cpufreq: Remove unnecessary use of policy->shared_type
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
7d5e350f 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
1da177e4
LT
48static DEFINE_SPINLOCK(cpufreq_driver_lock);
49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
62 * - All holders of the lock should check to make sure that the CPU they
63 * are concerned with are online after they get the lock.
64 * - Governor routines that can be called in cpufreq hotplug path should not
65 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
66 * - Lock should not be held across
67 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 68 */
f1625066 69static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
70static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
71
72#define lock_policy_rwsem(mode, cpu) \
226528c6 73static int lock_policy_rwsem_##mode \
5a01f2e8
VP
74(int cpu) \
75{ \
f1625066 76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
77 BUG_ON(policy_cpu == -1); \
78 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 if (unlikely(!cpu_online(cpu))) { \
80 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
81 return -1; \
82 } \
83 \
84 return 0; \
85}
86
87lock_policy_rwsem(read, cpu);
5a01f2e8
VP
88
89lock_policy_rwsem(write, cpu);
5a01f2e8 90
226528c6 91static void unlock_policy_rwsem_read(int cpu)
5a01f2e8 92{
f1625066 93 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
94 BUG_ON(policy_cpu == -1);
95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
96}
5a01f2e8 97
226528c6 98static void unlock_policy_rwsem_write(int cpu)
5a01f2e8 99{
f1625066 100 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
101 BUG_ON(policy_cpu == -1);
102 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
103}
5a01f2e8
VP
104
105
1da177e4 106/* internal prototypes */
29464f28
DJ
107static int __cpufreq_governor(struct cpufreq_policy *policy,
108 unsigned int event);
5a01f2e8 109static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 110static void handle_update(struct work_struct *work);
1da177e4
LT
111
112/**
32ee8c3e
DJ
113 * Two notifier lists: the "policy" list is involved in the
114 * validation process for a new CPU frequency policy; the
1da177e4
LT
115 * "transition" list for kernel code that needs to handle
116 * changes to devices when the CPU clock speed changes.
117 * The mutex locks both lists.
118 */
e041c683 119static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 120static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 121
74212ca4 122static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
123static int __init init_cpufreq_transition_notifier_list(void)
124{
125 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 126 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
127 return 0;
128}
b3438f82 129pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 130
a7b422cd 131static int off __read_mostly;
da584455 132static int cpufreq_disabled(void)
a7b422cd
KRW
133{
134 return off;
135}
136void disable_cpufreq(void)
137{
138 off = 1;
139}
1da177e4 140static LIST_HEAD(cpufreq_governor_list);
29464f28 141static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 142
a9144436 143static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
144{
145 struct cpufreq_policy *data;
146 unsigned long flags;
147
7a6aedfa 148 if (cpu >= nr_cpu_ids)
1da177e4
LT
149 goto err_out;
150
151 /* get the cpufreq driver */
152 spin_lock_irqsave(&cpufreq_driver_lock, flags);
153
154 if (!cpufreq_driver)
155 goto err_out_unlock;
156
157 if (!try_module_get(cpufreq_driver->owner))
158 goto err_out_unlock;
159
160
161 /* get the CPU */
7a6aedfa 162 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
163
164 if (!data)
165 goto err_out_put_module;
166
a9144436 167 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
168 goto err_out_put_module;
169
1da177e4 170 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
171 return data;
172
7d5e350f 173err_out_put_module:
1da177e4 174 module_put(cpufreq_driver->owner);
7d5e350f 175err_out_unlock:
1da177e4 176 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 177err_out:
1da177e4
LT
178 return NULL;
179}
a9144436
SB
180
181struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
182{
d5aaffa9
DB
183 if (cpufreq_disabled())
184 return NULL;
185
a9144436
SB
186 return __cpufreq_cpu_get(cpu, false);
187}
1da177e4
LT
188EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
189
a9144436
SB
190static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
191{
192 return __cpufreq_cpu_get(cpu, true);
193}
194
195static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
196{
197 if (!sysfs)
198 kobject_put(&data->kobj);
199 module_put(cpufreq_driver->owner);
200}
7d5e350f 201
1da177e4
LT
202void cpufreq_cpu_put(struct cpufreq_policy *data)
203{
d5aaffa9
DB
204 if (cpufreq_disabled())
205 return;
206
a9144436 207 __cpufreq_cpu_put(data, false);
1da177e4
LT
208}
209EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
210
a9144436
SB
211static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
212{
213 __cpufreq_cpu_put(data, true);
214}
1da177e4 215
1da177e4
LT
216/*********************************************************************
217 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
218 *********************************************************************/
219
220/**
221 * adjust_jiffies - adjust the system "loops_per_jiffy"
222 *
223 * This function alters the system "loops_per_jiffy" for the clock
224 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 225 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
226 * per-CPU loops_per_jiffy value wherever possible.
227 */
228#ifndef CONFIG_SMP
229static unsigned long l_p_j_ref;
230static unsigned int l_p_j_ref_freq;
231
858119e1 232static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
233{
234 if (ci->flags & CPUFREQ_CONST_LOOPS)
235 return;
236
237 if (!l_p_j_ref_freq) {
238 l_p_j_ref = loops_per_jiffy;
239 l_p_j_ref_freq = ci->old;
2d06d8c4 240 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 241 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 242 }
d08de0c1 243 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 244 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
245 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
246 ci->new);
2d06d8c4 247 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 248 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
249 }
250}
251#else
e08f5f5b
GS
252static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
253{
254 return;
255}
1da177e4
LT
256#endif
257
258
259/**
e4472cb3
DJ
260 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
261 * on frequency transition.
1da177e4 262 *
e4472cb3
DJ
263 * This function calls the transition notifiers and the "adjust_jiffies"
264 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 265 * external effects.
1da177e4
LT
266 */
267void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
268{
e4472cb3
DJ
269 struct cpufreq_policy *policy;
270
1da177e4
LT
271 BUG_ON(irqs_disabled());
272
d5aaffa9
DB
273 if (cpufreq_disabled())
274 return;
275
1da177e4 276 freqs->flags = cpufreq_driver->flags;
2d06d8c4 277 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 278 state, freqs->new);
1da177e4 279
7a6aedfa 280 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
1da177e4 281 switch (state) {
e4472cb3 282
1da177e4 283 case CPUFREQ_PRECHANGE:
32ee8c3e 284 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
285 * which is not equal to what the cpufreq core thinks is
286 * "old frequency".
1da177e4
LT
287 */
288 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
289 if ((policy) && (policy->cpu == freqs->cpu) &&
290 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 291 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
292 " %u, cpufreq assumed %u kHz.\n",
293 freqs->old, policy->cur);
294 freqs->old = policy->cur;
1da177e4
LT
295 }
296 }
b4dfdbb3 297 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 298 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
299 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
300 break;
e4472cb3 301
1da177e4
LT
302 case CPUFREQ_POSTCHANGE:
303 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 304 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723
TR
305 (unsigned long)freqs->cpu);
306 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
25e41933 307 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 308 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 309 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
310 if (likely(policy) && likely(policy->cpu == freqs->cpu))
311 policy->cur = freqs->new;
1da177e4
LT
312 break;
313 }
1da177e4
LT
314}
315EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
316
317
318
319/*********************************************************************
320 * SYSFS INTERFACE *
321 *********************************************************************/
322
3bcb09a3
JF
323static struct cpufreq_governor *__find_governor(const char *str_governor)
324{
325 struct cpufreq_governor *t;
326
327 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 328 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
329 return t;
330
331 return NULL;
332}
333
1da177e4
LT
334/**
335 * cpufreq_parse_governor - parse a governor string
336 */
905d77cd 337static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
338 struct cpufreq_governor **governor)
339{
3bcb09a3
JF
340 int err = -EINVAL;
341
1da177e4 342 if (!cpufreq_driver)
3bcb09a3
JF
343 goto out;
344
1da177e4
LT
345 if (cpufreq_driver->setpolicy) {
346 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
347 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 348 err = 0;
e08f5f5b
GS
349 } else if (!strnicmp(str_governor, "powersave",
350 CPUFREQ_NAME_LEN)) {
1da177e4 351 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 352 err = 0;
1da177e4 353 }
3bcb09a3 354 } else if (cpufreq_driver->target) {
1da177e4 355 struct cpufreq_governor *t;
3bcb09a3 356
3fc54d37 357 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
358
359 t = __find_governor(str_governor);
360
ea714970 361 if (t == NULL) {
1a8e1463 362 int ret;
ea714970 363
1a8e1463
KC
364 mutex_unlock(&cpufreq_governor_mutex);
365 ret = request_module("cpufreq_%s", str_governor);
366 mutex_lock(&cpufreq_governor_mutex);
ea714970 367
1a8e1463
KC
368 if (ret == 0)
369 t = __find_governor(str_governor);
ea714970
JF
370 }
371
3bcb09a3
JF
372 if (t != NULL) {
373 *governor = t;
374 err = 0;
1da177e4 375 }
3bcb09a3 376
3fc54d37 377 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 378 }
29464f28 379out:
3bcb09a3 380 return err;
1da177e4 381}
1da177e4
LT
382
383
1da177e4 384/**
e08f5f5b
GS
385 * cpufreq_per_cpu_attr_read() / show_##file_name() -
386 * print out cpufreq information
1da177e4
LT
387 *
388 * Write out information from cpufreq_driver->policy[cpu]; object must be
389 * "unsigned int".
390 */
391
32ee8c3e
DJ
392#define show_one(file_name, object) \
393static ssize_t show_##file_name \
905d77cd 394(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 395{ \
29464f28 396 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
397}
398
399show_one(cpuinfo_min_freq, cpuinfo.min_freq);
400show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 401show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
402show_one(scaling_min_freq, min);
403show_one(scaling_max_freq, max);
404show_one(scaling_cur_freq, cur);
405
e08f5f5b
GS
406static int __cpufreq_set_policy(struct cpufreq_policy *data,
407 struct cpufreq_policy *policy);
7970e08b 408
1da177e4
LT
409/**
410 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
411 */
412#define store_one(file_name, object) \
413static ssize_t store_##file_name \
905d77cd 414(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 415{ \
f55c9c26 416 unsigned int ret; \
1da177e4
LT
417 struct cpufreq_policy new_policy; \
418 \
419 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
420 if (ret) \
421 return -EINVAL; \
422 \
29464f28 423 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
424 if (ret != 1) \
425 return -EINVAL; \
426 \
7970e08b
TR
427 ret = __cpufreq_set_policy(policy, &new_policy); \
428 policy->user_policy.object = policy->object; \
1da177e4
LT
429 \
430 return ret ? ret : count; \
431}
432
29464f28
DJ
433store_one(scaling_min_freq, min);
434store_one(scaling_max_freq, max);
1da177e4
LT
435
436/**
437 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
438 */
905d77cd
DJ
439static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
440 char *buf)
1da177e4 441{
5a01f2e8 442 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
443 if (!cur_freq)
444 return sprintf(buf, "<unknown>");
445 return sprintf(buf, "%u\n", cur_freq);
446}
447
448
449/**
450 * show_scaling_governor - show the current policy for the specified CPU
451 */
905d77cd 452static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 453{
29464f28 454 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
455 return sprintf(buf, "powersave\n");
456 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
457 return sprintf(buf, "performance\n");
458 else if (policy->governor)
4b972f0b 459 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 460 policy->governor->name);
1da177e4
LT
461 return -EINVAL;
462}
463
464
465/**
466 * store_scaling_governor - store policy for the specified CPU
467 */
905d77cd
DJ
468static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
469 const char *buf, size_t count)
1da177e4 470{
f55c9c26 471 unsigned int ret;
1da177e4
LT
472 char str_governor[16];
473 struct cpufreq_policy new_policy;
474
475 ret = cpufreq_get_policy(&new_policy, policy->cpu);
476 if (ret)
477 return ret;
478
29464f28 479 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
480 if (ret != 1)
481 return -EINVAL;
482
e08f5f5b
GS
483 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
484 &new_policy.governor))
1da177e4
LT
485 return -EINVAL;
486
7970e08b
TR
487 /* Do not use cpufreq_set_policy here or the user_policy.max
488 will be wrongly overridden */
7970e08b
TR
489 ret = __cpufreq_set_policy(policy, &new_policy);
490
491 policy->user_policy.policy = policy->policy;
492 policy->user_policy.governor = policy->governor;
7970e08b 493
e08f5f5b
GS
494 if (ret)
495 return ret;
496 else
497 return count;
1da177e4
LT
498}
499
500/**
501 * show_scaling_driver - show the cpufreq driver currently loaded
502 */
905d77cd 503static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 504{
4b972f0b 505 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
506}
507
508/**
509 * show_scaling_available_governors - show the available CPUfreq governors
510 */
905d77cd
DJ
511static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
512 char *buf)
1da177e4
LT
513{
514 ssize_t i = 0;
515 struct cpufreq_governor *t;
516
517 if (!cpufreq_driver->target) {
518 i += sprintf(buf, "performance powersave");
519 goto out;
520 }
521
522 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
523 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
524 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 525 goto out;
4b972f0b 526 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 527 }
7d5e350f 528out:
1da177e4
LT
529 i += sprintf(&buf[i], "\n");
530 return i;
531}
e8628dd0 532
835481d9 533static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
534{
535 ssize_t i = 0;
536 unsigned int cpu;
537
835481d9 538 for_each_cpu(cpu, mask) {
1da177e4
LT
539 if (i)
540 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
541 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
542 if (i >= (PAGE_SIZE - 5))
29464f28 543 break;
1da177e4
LT
544 }
545 i += sprintf(&buf[i], "\n");
546 return i;
547}
548
e8628dd0
DW
549/**
550 * show_related_cpus - show the CPUs affected by each transition even if
551 * hw coordination is in use
552 */
553static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
554{
e8628dd0
DW
555 return show_cpus(policy->related_cpus, buf);
556}
557
558/**
559 * show_affected_cpus - show the CPUs affected by each transition
560 */
561static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
562{
563 return show_cpus(policy->cpus, buf);
564}
565
9e76988e 566static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 567 const char *buf, size_t count)
9e76988e
VP
568{
569 unsigned int freq = 0;
570 unsigned int ret;
571
879000f9 572 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
573 return -EINVAL;
574
575 ret = sscanf(buf, "%u", &freq);
576 if (ret != 1)
577 return -EINVAL;
578
579 policy->governor->store_setspeed(policy, freq);
580
581 return count;
582}
583
584static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
585{
879000f9 586 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
587 return sprintf(buf, "<unsupported>\n");
588
589 return policy->governor->show_setspeed(policy, buf);
590}
1da177e4 591
e2f74f35 592/**
8bf1ac72 593 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
594 */
595static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
596{
597 unsigned int limit;
598 int ret;
599 if (cpufreq_driver->bios_limit) {
600 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
601 if (!ret)
602 return sprintf(buf, "%u\n", limit);
603 }
604 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
605}
606
6dad2a29
BP
607cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
608cpufreq_freq_attr_ro(cpuinfo_min_freq);
609cpufreq_freq_attr_ro(cpuinfo_max_freq);
610cpufreq_freq_attr_ro(cpuinfo_transition_latency);
611cpufreq_freq_attr_ro(scaling_available_governors);
612cpufreq_freq_attr_ro(scaling_driver);
613cpufreq_freq_attr_ro(scaling_cur_freq);
614cpufreq_freq_attr_ro(bios_limit);
615cpufreq_freq_attr_ro(related_cpus);
616cpufreq_freq_attr_ro(affected_cpus);
617cpufreq_freq_attr_rw(scaling_min_freq);
618cpufreq_freq_attr_rw(scaling_max_freq);
619cpufreq_freq_attr_rw(scaling_governor);
620cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 621
905d77cd 622static struct attribute *default_attrs[] = {
1da177e4
LT
623 &cpuinfo_min_freq.attr,
624 &cpuinfo_max_freq.attr,
ed129784 625 &cpuinfo_transition_latency.attr,
1da177e4
LT
626 &scaling_min_freq.attr,
627 &scaling_max_freq.attr,
628 &affected_cpus.attr,
e8628dd0 629 &related_cpus.attr,
1da177e4
LT
630 &scaling_governor.attr,
631 &scaling_driver.attr,
632 &scaling_available_governors.attr,
9e76988e 633 &scaling_setspeed.attr,
1da177e4
LT
634 NULL
635};
636
8aa84ad8
TR
637struct kobject *cpufreq_global_kobject;
638EXPORT_SYMBOL(cpufreq_global_kobject);
639
29464f28
DJ
640#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
641#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 642
29464f28 643static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 644{
905d77cd
DJ
645 struct cpufreq_policy *policy = to_policy(kobj);
646 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 647 ssize_t ret = -EINVAL;
a9144436 648 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 649 if (!policy)
0db4a8a9 650 goto no_policy;
5a01f2e8
VP
651
652 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 653 goto fail;
5a01f2e8 654
e08f5f5b
GS
655 if (fattr->show)
656 ret = fattr->show(policy, buf);
657 else
658 ret = -EIO;
659
5a01f2e8 660 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 661fail:
a9144436 662 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 663no_policy:
1da177e4
LT
664 return ret;
665}
666
905d77cd
DJ
667static ssize_t store(struct kobject *kobj, struct attribute *attr,
668 const char *buf, size_t count)
1da177e4 669{
905d77cd
DJ
670 struct cpufreq_policy *policy = to_policy(kobj);
671 struct freq_attr *fattr = to_attr(attr);
a07530b4 672 ssize_t ret = -EINVAL;
a9144436 673 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 674 if (!policy)
a07530b4 675 goto no_policy;
5a01f2e8
VP
676
677 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 678 goto fail;
5a01f2e8 679
e08f5f5b
GS
680 if (fattr->store)
681 ret = fattr->store(policy, buf, count);
682 else
683 ret = -EIO;
684
5a01f2e8 685 unlock_policy_rwsem_write(policy->cpu);
a07530b4 686fail:
a9144436 687 cpufreq_cpu_put_sysfs(policy);
a07530b4 688no_policy:
1da177e4
LT
689 return ret;
690}
691
905d77cd 692static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 693{
905d77cd 694 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 695 pr_debug("last reference is dropped\n");
1da177e4
LT
696 complete(&policy->kobj_unregister);
697}
698
52cf25d0 699static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
700 .show = show,
701 .store = store,
702};
703
704static struct kobj_type ktype_cpufreq = {
705 .sysfs_ops = &sysfs_ops,
706 .default_attrs = default_attrs,
707 .release = cpufreq_sysfs_release,
708};
709
19d6f7ec 710/* symlink affected CPUs */
cf3289d0
AC
711static int cpufreq_add_dev_symlink(unsigned int cpu,
712 struct cpufreq_policy *policy)
19d6f7ec
DJ
713{
714 unsigned int j;
715 int ret = 0;
716
717 for_each_cpu(j, policy->cpus) {
718 struct cpufreq_policy *managed_policy;
8a25a2fd 719 struct device *cpu_dev;
19d6f7ec
DJ
720
721 if (j == cpu)
722 continue;
723 if (!cpu_online(j))
724 continue;
725
2d06d8c4 726 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 727 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
728 cpu_dev = get_cpu_device(j);
729 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
730 "cpufreq");
731 if (ret) {
732 cpufreq_cpu_put(managed_policy);
733 return ret;
734 }
735 }
736 return ret;
737}
738
cf3289d0
AC
739static int cpufreq_add_dev_interface(unsigned int cpu,
740 struct cpufreq_policy *policy,
8a25a2fd 741 struct device *dev)
909a694e 742{
ecf7e461 743 struct cpufreq_policy new_policy;
909a694e
DJ
744 struct freq_attr **drv_attr;
745 unsigned long flags;
746 int ret = 0;
747 unsigned int j;
748
749 /* prepare interface data */
750 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 751 &dev->kobj, "cpufreq");
909a694e
DJ
752 if (ret)
753 return ret;
754
755 /* set up files for this cpu device */
756 drv_attr = cpufreq_driver->attr;
757 while ((drv_attr) && (*drv_attr)) {
758 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
759 if (ret)
760 goto err_out_kobj_put;
761 drv_attr++;
762 }
763 if (cpufreq_driver->get) {
764 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
765 if (ret)
766 goto err_out_kobj_put;
767 }
768 if (cpufreq_driver->target) {
769 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
770 if (ret)
771 goto err_out_kobj_put;
772 }
e2f74f35
TR
773 if (cpufreq_driver->bios_limit) {
774 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
775 if (ret)
776 goto err_out_kobj_put;
777 }
909a694e
DJ
778
779 spin_lock_irqsave(&cpufreq_driver_lock, flags);
780 for_each_cpu(j, policy->cpus) {
bec037aa
JL
781 if (!cpu_online(j))
782 continue;
909a694e 783 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 784 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e
DJ
785 }
786 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
787
788 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
789 if (ret)
790 goto err_out_kobj_put;
791
792 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
793 /* assure that the starting sequence is run in __cpufreq_set_policy */
794 policy->governor = NULL;
795
796 /* set default policy */
797 ret = __cpufreq_set_policy(policy, &new_policy);
798 policy->user_policy.policy = policy->policy;
799 policy->user_policy.governor = policy->governor;
800
801 if (ret) {
2d06d8c4 802 pr_debug("setting policy failed\n");
ecf7e461
DJ
803 if (cpufreq_driver->exit)
804 cpufreq_driver->exit(policy);
805 }
909a694e
DJ
806 return ret;
807
808err_out_kobj_put:
809 kobject_put(&policy->kobj);
810 wait_for_completion(&policy->kobj_unregister);
811 return ret;
812}
813
fcf80582
VK
814#ifdef CONFIG_HOTPLUG_CPU
815static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
816 struct device *dev)
817{
818 struct cpufreq_policy *policy;
819 int ret = 0;
820 unsigned long flags;
821
822 policy = cpufreq_cpu_get(sibling);
823 WARN_ON(!policy);
824
825 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
826
827 lock_policy_rwsem_write(cpu);
828
829 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
830
831 spin_lock_irqsave(&cpufreq_driver_lock, flags);
832 cpumask_set_cpu(cpu, policy->cpus);
833 per_cpu(cpufreq_cpu_data, cpu) = policy;
834 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
835
836 __cpufreq_governor(policy, CPUFREQ_GOV_START);
837 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
838
839 unlock_policy_rwsem_write(cpu);
840
841 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
842 if (ret) {
843 cpufreq_cpu_put(policy);
844 return ret;
845 }
846
847 return 0;
848}
849#endif
1da177e4
LT
850
851/**
852 * cpufreq_add_dev - add a CPU device
853 *
32ee8c3e 854 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
855 *
856 * The Oracle says: try running cpufreq registration/unregistration concurrently
857 * with with cpu hotplugging and all hell will break loose. Tried to clean this
858 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 859 */
8a25a2fd 860static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 861{
fcf80582
VK
862 unsigned int j, cpu = dev->id;
863 int ret = -ENOMEM, found = 0;
1da177e4 864 struct cpufreq_policy *policy;
1da177e4 865 unsigned long flags;
90e41bac 866#ifdef CONFIG_HOTPLUG_CPU
fcf80582 867 struct cpufreq_governor *gov;
90e41bac
PB
868 int sibling;
869#endif
1da177e4 870
c32b6b8e
AR
871 if (cpu_is_offline(cpu))
872 return 0;
873
2d06d8c4 874 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
875
876#ifdef CONFIG_SMP
877 /* check whether a different CPU already registered this
878 * CPU because it is in the same boat. */
879 policy = cpufreq_cpu_get(cpu);
880 if (unlikely(policy)) {
8ff69732 881 cpufreq_cpu_put(policy);
1da177e4
LT
882 return 0;
883 }
fcf80582
VK
884
885#ifdef CONFIG_HOTPLUG_CPU
886 /* Check if this cpu was hot-unplugged earlier and has siblings */
887 for_each_online_cpu(sibling) {
888 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
889 if (cp && cpumask_test_cpu(cpu, cp->related_cpus))
890 return cpufreq_add_policy_cpu(cpu, sibling, dev);
891 }
892#endif
1da177e4
LT
893#endif
894
895 if (!try_module_get(cpufreq_driver->owner)) {
896 ret = -EINVAL;
897 goto module_out;
898 }
899
e98df50c 900 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 901 if (!policy)
1da177e4 902 goto nomem_out;
059019a3
DJ
903
904 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 905 goto err_free_policy;
059019a3
DJ
906
907 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 908 goto err_free_cpumask;
1da177e4
LT
909
910 policy->cpu = cpu;
835481d9 911 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 912
5a01f2e8 913 /* Initially set CPU itself as the policy_cpu */
f1625066 914 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
3f4a782b
MD
915 ret = (lock_policy_rwsem_write(cpu) < 0);
916 WARN_ON(ret);
5a01f2e8 917
1da177e4 918 init_completion(&policy->kobj_unregister);
65f27f38 919 INIT_WORK(&policy->update, handle_update);
1da177e4 920
8122c6ce 921 /* Set governor before ->init, so that driver could check it */
90e41bac
PB
922#ifdef CONFIG_HOTPLUG_CPU
923 for_each_online_cpu(sibling) {
924 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
925 if (cp && cp->governor &&
926 (cpumask_test_cpu(cpu, cp->related_cpus))) {
927 policy->governor = cp->governor;
928 found = 1;
929 break;
930 }
931 }
932#endif
933 if (!found)
934 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1da177e4
LT
935 /* call driver. From then on the cpufreq must be able
936 * to accept all calls to ->verify and ->setpolicy for this CPU
937 */
938 ret = cpufreq_driver->init(policy);
939 if (ret) {
2d06d8c4 940 pr_debug("initialization failed\n");
3f4a782b 941 goto err_unlock_policy;
1da177e4 942 }
643ae6e8 943
fcf80582
VK
944 /* related cpus should atleast have policy->cpus */
945 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
946
643ae6e8
VK
947 /*
948 * affected cpus must always be the one, which are online. We aren't
949 * managing offline cpus here.
950 */
951 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
952
187d9f4e
MC
953 policy->user_policy.min = policy->min;
954 policy->user_policy.max = policy->max;
1da177e4 955
a1531acd
TR
956 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
957 CPUFREQ_START, policy);
958
fcf80582
VK
959#ifdef CONFIG_HOTPLUG_CPU
960 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
961 if (gov) {
962 policy->governor = gov;
963 pr_debug("Restoring governor %s for cpu %d\n",
964 policy->governor->name, cpu);
4bfa042c 965 }
fcf80582 966#endif
1da177e4 967
8a25a2fd 968 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
969 if (ret)
970 goto err_out_unregister;
8ff69732 971
dca02613
LW
972 unlock_policy_rwsem_write(cpu);
973
038c5b3e 974 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 975 module_put(cpufreq_driver->owner);
2d06d8c4 976 pr_debug("initialization complete\n");
87c32271 977
1da177e4
LT
978 return 0;
979
1da177e4
LT
980err_out_unregister:
981 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 982 for_each_cpu(j, policy->cpus)
7a6aedfa 983 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
984 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
985
c10997f6 986 kobject_put(&policy->kobj);
1da177e4
LT
987 wait_for_completion(&policy->kobj_unregister);
988
3f4a782b 989err_unlock_policy:
45709118 990 unlock_policy_rwsem_write(cpu);
cad70a6a 991 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
992err_free_cpumask:
993 free_cpumask_var(policy->cpus);
994err_free_policy:
1da177e4 995 kfree(policy);
1da177e4
LT
996nomem_out:
997 module_put(cpufreq_driver->owner);
c32b6b8e 998module_out:
1da177e4
LT
999 return ret;
1000}
1001
b8eed8af
VK
1002static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1003{
1004 int j;
1005
1006 policy->last_cpu = policy->cpu;
1007 policy->cpu = cpu;
1008
1009 for_each_cpu(j, policy->cpus) {
1010 if (!cpu_online(j))
1011 continue;
1012 per_cpu(cpufreq_policy_cpu, j) = cpu;
1013 }
1014
1015#ifdef CONFIG_CPU_FREQ_TABLE
1016 cpufreq_frequency_table_update_policy_cpu(policy);
1017#endif
1018 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1019 CPUFREQ_UPDATE_POLICY_CPU, policy);
1020}
1da177e4
LT
1021
1022/**
5a01f2e8 1023 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1024 *
1025 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1026 * Caller should already have policy_rwsem in write mode for this CPU.
1027 * This routine frees the rwsem before returning.
1da177e4 1028 */
8a25a2fd 1029static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1030{
b8eed8af 1031 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1032 unsigned long flags;
1033 struct cpufreq_policy *data;
499bca9b
AW
1034 struct kobject *kobj;
1035 struct completion *cmp;
8a25a2fd 1036 struct device *cpu_dev;
1da177e4 1037
b8eed8af 1038 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4
LT
1039
1040 spin_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 1041 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
1042
1043 if (!data) {
b8eed8af 1044 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4 1045 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
5a01f2e8 1046 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1047 return -EINVAL;
1048 }
1da177e4 1049
b8eed8af 1050 if (cpufreq_driver->target)
f6a7409c 1051 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
084f3493
TR
1052
1053#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1054 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1055 CPUFREQ_NAME_LEN);
084f3493
TR
1056#endif
1057
b8eed8af
VK
1058 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1059 cpus = cpumask_weight(data->cpus);
1060 cpumask_clear_cpu(cpu, data->cpus);
1da177e4 1061
b8eed8af
VK
1062 if (unlikely((cpu == data->cpu) && (cpus > 1))) {
1063 /* first sibling now owns the new sysfs dir */
1064 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1065 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1066 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1067 if (ret) {
1068 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1069 cpumask_set_cpu(cpu, data->cpus);
1070 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1071 "cpufreq");
1072 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
499bca9b 1073 unlock_policy_rwsem_write(cpu);
b8eed8af 1074 return -EINVAL;
1da177e4 1075 }
b8eed8af
VK
1076
1077 update_policy_cpu(data, cpu_dev->id);
1078 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1079 __func__, cpu_dev->id, cpu);
1da177e4 1080 }
1da177e4 1081
b8eed8af 1082 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
5a01f2e8 1083
b8eed8af
VK
1084 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1085 cpufreq_cpu_put(data);
499bca9b 1086 unlock_policy_rwsem_write(cpu);
b8eed8af 1087 sysfs_remove_link(&dev->kobj, "cpufreq");
1da177e4 1088
b8eed8af
VK
1089 /* If cpu is last user of policy, free policy */
1090 if (cpus == 1) {
1091 lock_policy_rwsem_write(cpu);
1092 kobj = &data->kobj;
1093 cmp = &data->kobj_unregister;
1094 unlock_policy_rwsem_write(cpu);
1095 kobject_put(kobj);
7d26e2d5 1096
b8eed8af
VK
1097 /* we need to make sure that the underlying kobj is actually
1098 * not referenced anymore by anybody before we proceed with
1099 * unloading.
1100 */
1101 pr_debug("waiting for dropping of refcount\n");
1102 wait_for_completion(cmp);
1103 pr_debug("wait complete\n");
27ecddc2 1104
27ecddc2 1105 lock_policy_rwsem_write(cpu);
b8eed8af
VK
1106 if (cpufreq_driver->exit)
1107 cpufreq_driver->exit(data);
1108 unlock_policy_rwsem_write(cpu);
27ecddc2 1109
b8eed8af
VK
1110 free_cpumask_var(data->related_cpus);
1111 free_cpumask_var(data->cpus);
1112 kfree(data);
1113 } else if (cpufreq_driver->target) {
1114 __cpufreq_governor(data, CPUFREQ_GOV_START);
1115 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1116 }
1da177e4 1117
1da177e4
LT
1118 return 0;
1119}
1120
1121
8a25a2fd 1122static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1123{
8a25a2fd 1124 unsigned int cpu = dev->id;
5a01f2e8 1125 int retval;
ec28297a
VP
1126
1127 if (cpu_is_offline(cpu))
1128 return 0;
1129
5a01f2e8
VP
1130 if (unlikely(lock_policy_rwsem_write(cpu)))
1131 BUG();
1132
8a25a2fd 1133 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1134 return retval;
1135}
1136
1137
65f27f38 1138static void handle_update(struct work_struct *work)
1da177e4 1139{
65f27f38
DH
1140 struct cpufreq_policy *policy =
1141 container_of(work, struct cpufreq_policy, update);
1142 unsigned int cpu = policy->cpu;
2d06d8c4 1143 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1144 cpufreq_update_policy(cpu);
1145}
1146
1147/**
1148 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1149 * @cpu: cpu number
1150 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1151 * @new_freq: CPU frequency the CPU actually runs at
1152 *
29464f28
DJ
1153 * We adjust to current frequency first, and need to clean up later.
1154 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1155 */
e08f5f5b
GS
1156static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1157 unsigned int new_freq)
1da177e4
LT
1158{
1159 struct cpufreq_freqs freqs;
1160
2d06d8c4 1161 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1162 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1163
1164 freqs.cpu = cpu;
1165 freqs.old = old_freq;
1166 freqs.new = new_freq;
1167 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1168 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1169}
1170
1171
32ee8c3e 1172/**
4ab70df4 1173 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1174 * @cpu: CPU number
1175 *
1176 * This is the last known freq, without actually getting it from the driver.
1177 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1178 */
1179unsigned int cpufreq_quick_get(unsigned int cpu)
1180{
1181 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1182 unsigned int ret_freq = 0;
95235ca2
VP
1183
1184 if (policy) {
e08f5f5b 1185 ret_freq = policy->cur;
95235ca2
VP
1186 cpufreq_cpu_put(policy);
1187 }
1188
4d34a67d 1189 return ret_freq;
95235ca2
VP
1190}
1191EXPORT_SYMBOL(cpufreq_quick_get);
1192
3d737108
JB
1193/**
1194 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1195 * @cpu: CPU number
1196 *
1197 * Just return the max possible frequency for a given CPU.
1198 */
1199unsigned int cpufreq_quick_get_max(unsigned int cpu)
1200{
1201 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1202 unsigned int ret_freq = 0;
1203
1204 if (policy) {
1205 ret_freq = policy->max;
1206 cpufreq_cpu_put(policy);
1207 }
1208
1209 return ret_freq;
1210}
1211EXPORT_SYMBOL(cpufreq_quick_get_max);
1212
95235ca2 1213
5a01f2e8 1214static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1215{
7a6aedfa 1216 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1217 unsigned int ret_freq = 0;
1da177e4 1218
1da177e4 1219 if (!cpufreq_driver->get)
4d34a67d 1220 return ret_freq;
1da177e4 1221
e08f5f5b 1222 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1223
e08f5f5b
GS
1224 if (ret_freq && policy->cur &&
1225 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1226 /* verify no discrepancy between actual and
1227 saved value exists */
1228 if (unlikely(ret_freq != policy->cur)) {
1229 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1230 schedule_work(&policy->update);
1231 }
1232 }
1233
4d34a67d 1234 return ret_freq;
5a01f2e8 1235}
1da177e4 1236
5a01f2e8
VP
1237/**
1238 * cpufreq_get - get the current CPU frequency (in kHz)
1239 * @cpu: CPU number
1240 *
1241 * Get the CPU current (static) CPU frequency
1242 */
1243unsigned int cpufreq_get(unsigned int cpu)
1244{
1245 unsigned int ret_freq = 0;
1246 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1247
1248 if (!policy)
1249 goto out;
1250
1251 if (unlikely(lock_policy_rwsem_read(cpu)))
1252 goto out_policy;
1253
1254 ret_freq = __cpufreq_get(cpu);
1255
1256 unlock_policy_rwsem_read(cpu);
1da177e4 1257
5a01f2e8
VP
1258out_policy:
1259 cpufreq_cpu_put(policy);
1260out:
4d34a67d 1261 return ret_freq;
1da177e4
LT
1262}
1263EXPORT_SYMBOL(cpufreq_get);
1264
8a25a2fd
KS
1265static struct subsys_interface cpufreq_interface = {
1266 .name = "cpufreq",
1267 .subsys = &cpu_subsys,
1268 .add_dev = cpufreq_add_dev,
1269 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1270};
1271
1da177e4 1272
42d4dc3f 1273/**
e00e56df
RW
1274 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1275 *
1276 * This function is only executed for the boot processor. The other CPUs
1277 * have been put offline by means of CPU hotplug.
42d4dc3f 1278 */
e00e56df 1279static int cpufreq_bp_suspend(void)
42d4dc3f 1280{
e08f5f5b 1281 int ret = 0;
4bc5d341 1282
e00e56df 1283 int cpu = smp_processor_id();
42d4dc3f
BH
1284 struct cpufreq_policy *cpu_policy;
1285
2d06d8c4 1286 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1287
e00e56df 1288 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1289 cpu_policy = cpufreq_cpu_get(cpu);
1290 if (!cpu_policy)
e00e56df 1291 return 0;
42d4dc3f
BH
1292
1293 if (cpufreq_driver->suspend) {
7ca64e2d 1294 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1295 if (ret)
42d4dc3f
BH
1296 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1297 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1298 }
1299
42d4dc3f 1300 cpufreq_cpu_put(cpu_policy);
c9060494 1301 return ret;
42d4dc3f
BH
1302}
1303
1da177e4 1304/**
e00e56df 1305 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1306 *
1307 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1308 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1309 * restored. It will verify that the current freq is in sync with
1310 * what we believe it to be. This is a bit later than when it
1311 * should be, but nonethteless it's better than calling
1312 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1313 *
1314 * This function is only executed for the boot CPU. The other CPUs have not
1315 * been turned on yet.
1da177e4 1316 */
e00e56df 1317static void cpufreq_bp_resume(void)
1da177e4 1318{
e08f5f5b 1319 int ret = 0;
4bc5d341 1320
e00e56df 1321 int cpu = smp_processor_id();
1da177e4
LT
1322 struct cpufreq_policy *cpu_policy;
1323
2d06d8c4 1324 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1325
e00e56df 1326 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1327 cpu_policy = cpufreq_cpu_get(cpu);
1328 if (!cpu_policy)
e00e56df 1329 return;
1da177e4
LT
1330
1331 if (cpufreq_driver->resume) {
1332 ret = cpufreq_driver->resume(cpu_policy);
1333 if (ret) {
1334 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1335 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1336 goto fail;
1da177e4
LT
1337 }
1338 }
1339
1da177e4 1340 schedule_work(&cpu_policy->update);
ce6c3997 1341
c9060494 1342fail:
1da177e4 1343 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1344}
1345
e00e56df
RW
1346static struct syscore_ops cpufreq_syscore_ops = {
1347 .suspend = cpufreq_bp_suspend,
1348 .resume = cpufreq_bp_resume,
1da177e4
LT
1349};
1350
9d95046e
BP
1351/**
1352 * cpufreq_get_current_driver - return current driver's name
1353 *
1354 * Return the name string of the currently loaded cpufreq driver
1355 * or NULL, if none.
1356 */
1357const char *cpufreq_get_current_driver(void)
1358{
1359 if (cpufreq_driver)
1360 return cpufreq_driver->name;
1361
1362 return NULL;
1363}
1364EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1365
1366/*********************************************************************
1367 * NOTIFIER LISTS INTERFACE *
1368 *********************************************************************/
1369
1370/**
1371 * cpufreq_register_notifier - register a driver with cpufreq
1372 * @nb: notifier function to register
1373 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1374 *
32ee8c3e 1375 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1376 * are notified about clock rate changes (once before and once after
1377 * the transition), or a list of drivers that are notified about
1378 * changes in cpufreq policy.
1379 *
1380 * This function may sleep, and has the same return conditions as
e041c683 1381 * blocking_notifier_chain_register.
1da177e4
LT
1382 */
1383int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1384{
1385 int ret;
1386
d5aaffa9
DB
1387 if (cpufreq_disabled())
1388 return -EINVAL;
1389
74212ca4
CEB
1390 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1391
1da177e4
LT
1392 switch (list) {
1393 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1394 ret = srcu_notifier_chain_register(
e041c683 1395 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1396 break;
1397 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1398 ret = blocking_notifier_chain_register(
1399 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1400 break;
1401 default:
1402 ret = -EINVAL;
1403 }
1da177e4
LT
1404
1405 return ret;
1406}
1407EXPORT_SYMBOL(cpufreq_register_notifier);
1408
1409
1410/**
1411 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1412 * @nb: notifier block to be unregistered
1413 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1414 *
1415 * Remove a driver from the CPU frequency notifier list.
1416 *
1417 * This function may sleep, and has the same return conditions as
e041c683 1418 * blocking_notifier_chain_unregister.
1da177e4
LT
1419 */
1420int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1421{
1422 int ret;
1423
d5aaffa9
DB
1424 if (cpufreq_disabled())
1425 return -EINVAL;
1426
1da177e4
LT
1427 switch (list) {
1428 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1429 ret = srcu_notifier_chain_unregister(
e041c683 1430 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1431 break;
1432 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1433 ret = blocking_notifier_chain_unregister(
1434 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1435 break;
1436 default:
1437 ret = -EINVAL;
1438 }
1da177e4
LT
1439
1440 return ret;
1441}
1442EXPORT_SYMBOL(cpufreq_unregister_notifier);
1443
1444
1445/*********************************************************************
1446 * GOVERNORS *
1447 *********************************************************************/
1448
1449
1450int __cpufreq_driver_target(struct cpufreq_policy *policy,
1451 unsigned int target_freq,
1452 unsigned int relation)
1453{
1454 int retval = -EINVAL;
7249924e 1455 unsigned int old_target_freq = target_freq;
c32b6b8e 1456
a7b422cd
KRW
1457 if (cpufreq_disabled())
1458 return -ENODEV;
1459
7249924e
VK
1460 /* Make sure that target_freq is within supported range */
1461 if (target_freq > policy->max)
1462 target_freq = policy->max;
1463 if (target_freq < policy->min)
1464 target_freq = policy->min;
1465
1466 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1467 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1468
1469 if (target_freq == policy->cur)
1470 return 0;
1471
1da177e4
LT
1472 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1473 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1474
1da177e4
LT
1475 return retval;
1476}
1477EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1478
1da177e4
LT
1479int cpufreq_driver_target(struct cpufreq_policy *policy,
1480 unsigned int target_freq,
1481 unsigned int relation)
1482{
f1829e4a 1483 int ret = -EINVAL;
1da177e4
LT
1484
1485 policy = cpufreq_cpu_get(policy->cpu);
1486 if (!policy)
f1829e4a 1487 goto no_policy;
1da177e4 1488
5a01f2e8 1489 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1490 goto fail;
1da177e4
LT
1491
1492 ret = __cpufreq_driver_target(policy, target_freq, relation);
1493
5a01f2e8 1494 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1495
f1829e4a 1496fail:
1da177e4 1497 cpufreq_cpu_put(policy);
f1829e4a 1498no_policy:
1da177e4
LT
1499 return ret;
1500}
1501EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1502
bf0b90e3 1503int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1504{
1505 int ret = 0;
1506
d5aaffa9
DB
1507 if (cpufreq_disabled())
1508 return ret;
1509
0676f7f2
VK
1510 if (!(cpu_online(cpu) && cpufreq_driver->getavg))
1511 return 0;
1512
dfde5d62
VP
1513 policy = cpufreq_cpu_get(policy->cpu);
1514 if (!policy)
1515 return -EINVAL;
1516
0676f7f2 1517 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1518
dfde5d62
VP
1519 cpufreq_cpu_put(policy);
1520 return ret;
1521}
5a01f2e8 1522EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1523
153d7f3f 1524/*
153d7f3f
AV
1525 * when "event" is CPUFREQ_GOV_LIMITS
1526 */
1da177e4 1527
e08f5f5b
GS
1528static int __cpufreq_governor(struct cpufreq_policy *policy,
1529 unsigned int event)
1da177e4 1530{
cc993cab 1531 int ret;
6afde10c
TR
1532
1533 /* Only must be defined when default governor is known to have latency
1534 restrictions, like e.g. conservative or ondemand.
1535 That this is the case is already ensured in Kconfig
1536 */
1537#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1538 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1539#else
1540 struct cpufreq_governor *gov = NULL;
1541#endif
1c256245
TR
1542
1543 if (policy->governor->max_transition_latency &&
1544 policy->cpuinfo.transition_latency >
1545 policy->governor->max_transition_latency) {
6afde10c
TR
1546 if (!gov)
1547 return -EINVAL;
1548 else {
1549 printk(KERN_WARNING "%s governor failed, too long"
1550 " transition latency of HW, fallback"
1551 " to %s governor\n",
1552 policy->governor->name,
1553 gov->name);
1554 policy->governor = gov;
1555 }
1c256245 1556 }
1da177e4
LT
1557
1558 if (!try_module_get(policy->governor->owner))
1559 return -EINVAL;
1560
2d06d8c4 1561 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1562 policy->cpu, event);
1da177e4
LT
1563 ret = policy->governor->governor(policy, event);
1564
b394058f
VK
1565 if (!policy->governor->initialized && (event == CPUFREQ_GOV_START))
1566 policy->governor->initialized = 1;
1567
e08f5f5b
GS
1568 /* we keep one module reference alive for
1569 each CPU governed by this CPU */
1da177e4
LT
1570 if ((event != CPUFREQ_GOV_START) || ret)
1571 module_put(policy->governor->owner);
1572 if ((event == CPUFREQ_GOV_STOP) && !ret)
1573 module_put(policy->governor->owner);
1574
1575 return ret;
1576}
1577
1578
1da177e4
LT
1579int cpufreq_register_governor(struct cpufreq_governor *governor)
1580{
3bcb09a3 1581 int err;
1da177e4
LT
1582
1583 if (!governor)
1584 return -EINVAL;
1585
a7b422cd
KRW
1586 if (cpufreq_disabled())
1587 return -ENODEV;
1588
3fc54d37 1589 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1590
b394058f 1591 governor->initialized = 0;
3bcb09a3
JF
1592 err = -EBUSY;
1593 if (__find_governor(governor->name) == NULL) {
1594 err = 0;
1595 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1596 }
1da177e4 1597
32ee8c3e 1598 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1599 return err;
1da177e4
LT
1600}
1601EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1602
1603
1604void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1605{
90e41bac
PB
1606#ifdef CONFIG_HOTPLUG_CPU
1607 int cpu;
1608#endif
1609
1da177e4
LT
1610 if (!governor)
1611 return;
1612
a7b422cd
KRW
1613 if (cpufreq_disabled())
1614 return;
1615
90e41bac
PB
1616#ifdef CONFIG_HOTPLUG_CPU
1617 for_each_present_cpu(cpu) {
1618 if (cpu_online(cpu))
1619 continue;
1620 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1621 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1622 }
1623#endif
1624
3fc54d37 1625 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1626 list_del(&governor->governor_list);
3fc54d37 1627 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1628 return;
1629}
1630EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1631
1632
1633
1634/*********************************************************************
1635 * POLICY INTERFACE *
1636 *********************************************************************/
1637
1638/**
1639 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1640 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1641 * is written
1da177e4
LT
1642 *
1643 * Reads the current cpufreq policy.
1644 */
1645int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1646{
1647 struct cpufreq_policy *cpu_policy;
1648 if (!policy)
1649 return -EINVAL;
1650
1651 cpu_policy = cpufreq_cpu_get(cpu);
1652 if (!cpu_policy)
1653 return -EINVAL;
1654
1da177e4 1655 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1656
1657 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1658 return 0;
1659}
1660EXPORT_SYMBOL(cpufreq_get_policy);
1661
1662
153d7f3f 1663/*
e08f5f5b
GS
1664 * data : current policy.
1665 * policy : policy to be set.
153d7f3f 1666 */
e08f5f5b
GS
1667static int __cpufreq_set_policy(struct cpufreq_policy *data,
1668 struct cpufreq_policy *policy)
1da177e4
LT
1669{
1670 int ret = 0;
1671
2d06d8c4 1672 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1673 policy->min, policy->max);
1674
e08f5f5b
GS
1675 memcpy(&policy->cpuinfo, &data->cpuinfo,
1676 sizeof(struct cpufreq_cpuinfo));
1da177e4 1677
53391fa2 1678 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1679 ret = -EINVAL;
1680 goto error_out;
1681 }
1682
1da177e4
LT
1683 /* verify the cpu speed can be set within this limit */
1684 ret = cpufreq_driver->verify(policy);
1685 if (ret)
1686 goto error_out;
1687
1da177e4 1688 /* adjust if necessary - all reasons */
e041c683
AS
1689 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1690 CPUFREQ_ADJUST, policy);
1da177e4
LT
1691
1692 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1693 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1694 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1695
1696 /* verify the cpu speed can be set within this limit,
1697 which might be different to the first one */
1698 ret = cpufreq_driver->verify(policy);
e041c683 1699 if (ret)
1da177e4 1700 goto error_out;
1da177e4
LT
1701
1702 /* notification of the new policy */
e041c683
AS
1703 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1704 CPUFREQ_NOTIFY, policy);
1da177e4 1705
7d5e350f
DJ
1706 data->min = policy->min;
1707 data->max = policy->max;
1da177e4 1708
2d06d8c4 1709 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1710 data->min, data->max);
1da177e4
LT
1711
1712 if (cpufreq_driver->setpolicy) {
1713 data->policy = policy->policy;
2d06d8c4 1714 pr_debug("setting range\n");
1da177e4
LT
1715 ret = cpufreq_driver->setpolicy(policy);
1716 } else {
1717 if (policy->governor != data->governor) {
1718 /* save old, working values */
1719 struct cpufreq_governor *old_gov = data->governor;
1720
2d06d8c4 1721 pr_debug("governor switch\n");
1da177e4
LT
1722
1723 /* end old governor */
ffe6275f 1724 if (data->governor)
1da177e4
LT
1725 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1726
1727 /* start new governor */
1728 data->governor = policy->governor;
1729 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1730 /* new governor failed, so re-start old one */
2d06d8c4 1731 pr_debug("starting governor %s failed\n",
e08f5f5b 1732 data->governor->name);
1da177e4
LT
1733 if (old_gov) {
1734 data->governor = old_gov;
e08f5f5b
GS
1735 __cpufreq_governor(data,
1736 CPUFREQ_GOV_START);
1da177e4
LT
1737 }
1738 ret = -EINVAL;
1739 goto error_out;
1740 }
1741 /* might be a policy change, too, so fall through */
1742 }
2d06d8c4 1743 pr_debug("governor: change or update limits\n");
1da177e4
LT
1744 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1745 }
1746
7d5e350f 1747error_out:
1da177e4
LT
1748 return ret;
1749}
1750
1da177e4
LT
1751/**
1752 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1753 * @cpu: CPU which shall be re-evaluated
1754 *
25985edc 1755 * Useful for policy notifiers which have different necessities
1da177e4
LT
1756 * at different times.
1757 */
1758int cpufreq_update_policy(unsigned int cpu)
1759{
1760 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1761 struct cpufreq_policy policy;
f1829e4a 1762 int ret;
1da177e4 1763
f1829e4a
JL
1764 if (!data) {
1765 ret = -ENODEV;
1766 goto no_policy;
1767 }
1da177e4 1768
f1829e4a
JL
1769 if (unlikely(lock_policy_rwsem_write(cpu))) {
1770 ret = -EINVAL;
1771 goto fail;
1772 }
1da177e4 1773
2d06d8c4 1774 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1775 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1776 policy.min = data->user_policy.min;
1777 policy.max = data->user_policy.max;
1778 policy.policy = data->user_policy.policy;
1779 policy.governor = data->user_policy.governor;
1780
0961dd0d
TR
1781 /* BIOS might change freq behind our back
1782 -> ask driver for current freq and notify governors about a change */
1783 if (cpufreq_driver->get) {
1784 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1785 if (!data->cur) {
2d06d8c4 1786 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1787 data->cur = policy.cur;
1788 } else {
1789 if (data->cur != policy.cur)
e08f5f5b
GS
1790 cpufreq_out_of_sync(cpu, data->cur,
1791 policy.cur);
a85f7bd3 1792 }
0961dd0d
TR
1793 }
1794
1da177e4
LT
1795 ret = __cpufreq_set_policy(data, &policy);
1796
5a01f2e8
VP
1797 unlock_policy_rwsem_write(cpu);
1798
f1829e4a 1799fail:
1da177e4 1800 cpufreq_cpu_put(data);
f1829e4a 1801no_policy:
1da177e4
LT
1802 return ret;
1803}
1804EXPORT_SYMBOL(cpufreq_update_policy);
1805
dd184a01 1806static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1807 unsigned long action, void *hcpu)
1808{
1809 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1810 struct device *dev;
c32b6b8e 1811
8a25a2fd
KS
1812 dev = get_cpu_device(cpu);
1813 if (dev) {
c32b6b8e
AR
1814 switch (action) {
1815 case CPU_ONLINE:
8bb78442 1816 case CPU_ONLINE_FROZEN:
8a25a2fd 1817 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1818 break;
1819 case CPU_DOWN_PREPARE:
8bb78442 1820 case CPU_DOWN_PREPARE_FROZEN:
5a01f2e8
VP
1821 if (unlikely(lock_policy_rwsem_write(cpu)))
1822 BUG();
1823
8a25a2fd 1824 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1825 break;
5a01f2e8 1826 case CPU_DOWN_FAILED:
8bb78442 1827 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1828 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1829 break;
1830 }
1831 }
1832 return NOTIFY_OK;
1833}
1834
9c36f746 1835static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1836 .notifier_call = cpufreq_cpu_callback,
1837};
1da177e4
LT
1838
1839/*********************************************************************
1840 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1841 *********************************************************************/
1842
1843/**
1844 * cpufreq_register_driver - register a CPU Frequency driver
1845 * @driver_data: A struct cpufreq_driver containing the values#
1846 * submitted by the CPU Frequency driver.
1847 *
32ee8c3e 1848 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1849 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1850 * (and isn't unregistered in the meantime).
1da177e4
LT
1851 *
1852 */
221dee28 1853int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1854{
1855 unsigned long flags;
1856 int ret;
1857
a7b422cd
KRW
1858 if (cpufreq_disabled())
1859 return -ENODEV;
1860
1da177e4
LT
1861 if (!driver_data || !driver_data->verify || !driver_data->init ||
1862 ((!driver_data->setpolicy) && (!driver_data->target)))
1863 return -EINVAL;
1864
2d06d8c4 1865 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1866
1867 if (driver_data->setpolicy)
1868 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1869
1870 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1871 if (cpufreq_driver) {
1872 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1873 return -EBUSY;
1874 }
1875 cpufreq_driver = driver_data;
1876 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1877
8a25a2fd 1878 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1879 if (ret)
1880 goto err_null_driver;
1da177e4 1881
8f5bc2ab 1882 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1883 int i;
1884 ret = -ENODEV;
1885
1886 /* check for at least one working CPU */
7a6aedfa
MT
1887 for (i = 0; i < nr_cpu_ids; i++)
1888 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1889 ret = 0;
7a6aedfa
MT
1890 break;
1891 }
1da177e4
LT
1892
1893 /* if all ->init() calls failed, unregister */
1894 if (ret) {
2d06d8c4 1895 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1896 driver_data->name);
8a25a2fd 1897 goto err_if_unreg;
1da177e4
LT
1898 }
1899 }
1900
8f5bc2ab 1901 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1902 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1903
8f5bc2ab 1904 return 0;
8a25a2fd
KS
1905err_if_unreg:
1906 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab
JS
1907err_null_driver:
1908 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1909 cpufreq_driver = NULL;
1910 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1911 return ret;
1da177e4
LT
1912}
1913EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1914
1915
1916/**
1917 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1918 *
32ee8c3e 1919 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1920 * the right to do so, i.e. if you have succeeded in initialising before!
1921 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1922 * currently not initialised.
1923 */
221dee28 1924int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1925{
1926 unsigned long flags;
1927
2d06d8c4 1928 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1929 return -EINVAL;
1da177e4 1930
2d06d8c4 1931 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1932
8a25a2fd 1933 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1934 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1935
1936 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1937 cpufreq_driver = NULL;
1938 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1939
1940 return 0;
1941}
1942EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1943
1944static int __init cpufreq_core_init(void)
1945{
1946 int cpu;
1947
a7b422cd
KRW
1948 if (cpufreq_disabled())
1949 return -ENODEV;
1950
5a01f2e8 1951 for_each_possible_cpu(cpu) {
f1625066 1952 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1953 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1954 }
8aa84ad8 1955
8a25a2fd 1956 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1957 BUG_ON(!cpufreq_global_kobject);
e00e56df 1958 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1959
5a01f2e8
VP
1960 return 0;
1961}
5a01f2e8 1962core_initcall(cpufreq_core_init);
This page took 0.6815 seconds and 5 git commands to generate.