2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver
*cpufreq_driver
;
39 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
40 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data_fallback
);
41 static DEFINE_RWLOCK(cpufreq_driver_lock
);
42 static DEFINE_MUTEX(cpufreq_governor_lock
);
43 static LIST_HEAD(cpufreq_policy_list
);
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN
], cpufreq_cpu_governor
);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(struct rw_semaphore
, cpu_policy_rwsem
);
69 #define lock_policy_rwsem(mode, cpu) \
70 static int lock_policy_rwsem_##mode(int cpu) \
72 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
79 lock_policy_rwsem(read
, cpu
);
80 lock_policy_rwsem(write
, cpu
);
82 #define unlock_policy_rwsem(mode, cpu) \
83 static void unlock_policy_rwsem_##mode(int cpu) \
85 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
87 up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
90 unlock_policy_rwsem(read
, cpu
);
91 unlock_policy_rwsem(write
, cpu
);
94 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
97 static DECLARE_RWSEM(cpufreq_rwsem
);
99 /* internal prototypes */
100 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
102 static unsigned int __cpufreq_get(unsigned int cpu
);
103 static void handle_update(struct work_struct
*work
);
106 * Two notifier lists: the "policy" list is involved in the
107 * validation process for a new CPU frequency policy; the
108 * "transition" list for kernel code that needs to handle
109 * changes to devices when the CPU clock speed changes.
110 * The mutex locks both lists.
112 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
113 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
115 static bool init_cpufreq_transition_notifier_list_called
;
116 static int __init
init_cpufreq_transition_notifier_list(void)
118 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
119 init_cpufreq_transition_notifier_list_called
= true;
122 pure_initcall(init_cpufreq_transition_notifier_list
);
124 static int off __read_mostly
;
125 static int cpufreq_disabled(void)
129 void disable_cpufreq(void)
133 static LIST_HEAD(cpufreq_governor_list
);
134 static DEFINE_MUTEX(cpufreq_governor_mutex
);
136 bool have_governor_per_policy(void)
138 return cpufreq_driver
->have_governor_per_policy
;
140 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
142 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
144 if (have_governor_per_policy())
145 return &policy
->kobj
;
147 return cpufreq_global_kobject
;
149 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
151 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
157 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
159 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
160 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
161 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
162 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
163 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
164 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
166 idle_time
= cur_wall_time
- busy_time
;
168 *wall
= cputime_to_usecs(cur_wall_time
);
170 return cputime_to_usecs(idle_time
);
173 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
175 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
177 if (idle_time
== -1ULL)
178 return get_cpu_idle_time_jiffy(cpu
, wall
);
180 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
184 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
186 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
188 struct cpufreq_policy
*policy
= NULL
;
191 if (cpufreq_disabled() || (cpu
>= nr_cpu_ids
))
194 if (!down_read_trylock(&cpufreq_rwsem
))
197 /* get the cpufreq driver */
198 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
200 if (cpufreq_driver
) {
202 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
204 kobject_get(&policy
->kobj
);
207 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
210 up_read(&cpufreq_rwsem
);
214 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
216 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
218 if (cpufreq_disabled())
221 kobject_put(&policy
->kobj
);
222 up_read(&cpufreq_rwsem
);
224 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
226 /*********************************************************************
227 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
228 *********************************************************************/
231 * adjust_jiffies - adjust the system "loops_per_jiffy"
233 * This function alters the system "loops_per_jiffy" for the clock
234 * speed change. Note that loops_per_jiffy cannot be updated on SMP
235 * systems as each CPU might be scaled differently. So, use the arch
236 * per-CPU loops_per_jiffy value wherever possible.
239 static unsigned long l_p_j_ref
;
240 static unsigned int l_p_j_ref_freq
;
242 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
244 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
247 if (!l_p_j_ref_freq
) {
248 l_p_j_ref
= loops_per_jiffy
;
249 l_p_j_ref_freq
= ci
->old
;
250 pr_debug("saving %lu as reference value for loops_per_jiffy; "
251 "freq is %u kHz\n", l_p_j_ref
, l_p_j_ref_freq
);
253 if ((val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) ||
254 (val
== CPUFREQ_RESUMECHANGE
|| val
== CPUFREQ_SUSPENDCHANGE
)) {
255 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
257 pr_debug("scaling loops_per_jiffy to %lu "
258 "for frequency %u kHz\n", loops_per_jiffy
, ci
->new);
262 static inline void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
268 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
269 struct cpufreq_freqs
*freqs
, unsigned int state
)
271 BUG_ON(irqs_disabled());
273 if (cpufreq_disabled())
276 freqs
->flags
= cpufreq_driver
->flags
;
277 pr_debug("notification %u of frequency transition to %u kHz\n",
282 case CPUFREQ_PRECHANGE
:
283 /* detect if the driver reported a value as "old frequency"
284 * which is not equal to what the cpufreq core thinks is
287 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
288 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
289 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
290 pr_debug("Warning: CPU frequency is"
291 " %u, cpufreq assumed %u kHz.\n",
292 freqs
->old
, policy
->cur
);
293 freqs
->old
= policy
->cur
;
296 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
297 CPUFREQ_PRECHANGE
, freqs
);
298 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
301 case CPUFREQ_POSTCHANGE
:
302 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
303 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs
->new,
304 (unsigned long)freqs
->cpu
);
305 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
306 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
307 CPUFREQ_POSTCHANGE
, freqs
);
308 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
309 policy
->cur
= freqs
->new;
315 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
316 * on frequency transition.
318 * This function calls the transition notifiers and the "adjust_jiffies"
319 * function. It is called twice on all CPU frequency changes that have
322 void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
323 struct cpufreq_freqs
*freqs
, unsigned int state
)
325 for_each_cpu(freqs
->cpu
, policy
->cpus
)
326 __cpufreq_notify_transition(policy
, freqs
, state
);
328 EXPORT_SYMBOL_GPL(cpufreq_notify_transition
);
331 /*********************************************************************
333 *********************************************************************/
335 static struct cpufreq_governor
*__find_governor(const char *str_governor
)
337 struct cpufreq_governor
*t
;
339 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
)
340 if (!strnicmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
347 * cpufreq_parse_governor - parse a governor string
349 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
350 struct cpufreq_governor
**governor
)
357 if (cpufreq_driver
->setpolicy
) {
358 if (!strnicmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
359 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
361 } else if (!strnicmp(str_governor
, "powersave",
363 *policy
= CPUFREQ_POLICY_POWERSAVE
;
366 } else if (cpufreq_driver
->target
) {
367 struct cpufreq_governor
*t
;
369 mutex_lock(&cpufreq_governor_mutex
);
371 t
= __find_governor(str_governor
);
376 mutex_unlock(&cpufreq_governor_mutex
);
377 ret
= request_module("cpufreq_%s", str_governor
);
378 mutex_lock(&cpufreq_governor_mutex
);
381 t
= __find_governor(str_governor
);
389 mutex_unlock(&cpufreq_governor_mutex
);
396 * cpufreq_per_cpu_attr_read() / show_##file_name() -
397 * print out cpufreq information
399 * Write out information from cpufreq_driver->policy[cpu]; object must be
403 #define show_one(file_name, object) \
404 static ssize_t show_##file_name \
405 (struct cpufreq_policy *policy, char *buf) \
407 return sprintf(buf, "%u\n", policy->object); \
410 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
411 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
412 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
413 show_one(scaling_min_freq
, min
);
414 show_one(scaling_max_freq
, max
);
415 show_one(scaling_cur_freq
, cur
);
417 static int __cpufreq_set_policy(struct cpufreq_policy
*policy
,
418 struct cpufreq_policy
*new_policy
);
421 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
423 #define store_one(file_name, object) \
424 static ssize_t store_##file_name \
425 (struct cpufreq_policy *policy, const char *buf, size_t count) \
428 struct cpufreq_policy new_policy; \
430 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
434 ret = sscanf(buf, "%u", &new_policy.object); \
438 ret = __cpufreq_set_policy(policy, &new_policy); \
439 policy->user_policy.object = policy->object; \
441 return ret ? ret : count; \
444 store_one(scaling_min_freq
, min
);
445 store_one(scaling_max_freq
, max
);
448 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
450 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
453 unsigned int cur_freq
= __cpufreq_get(policy
->cpu
);
455 return sprintf(buf
, "<unknown>");
456 return sprintf(buf
, "%u\n", cur_freq
);
460 * show_scaling_governor - show the current policy for the specified CPU
462 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
464 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
465 return sprintf(buf
, "powersave\n");
466 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
467 return sprintf(buf
, "performance\n");
468 else if (policy
->governor
)
469 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
470 policy
->governor
->name
);
475 * store_scaling_governor - store policy for the specified CPU
477 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
478 const char *buf
, size_t count
)
481 char str_governor
[16];
482 struct cpufreq_policy new_policy
;
484 ret
= cpufreq_get_policy(&new_policy
, policy
->cpu
);
488 ret
= sscanf(buf
, "%15s", str_governor
);
492 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
493 &new_policy
.governor
))
497 * Do not use cpufreq_set_policy here or the user_policy.max
498 * will be wrongly overridden
500 ret
= __cpufreq_set_policy(policy
, &new_policy
);
502 policy
->user_policy
.policy
= policy
->policy
;
503 policy
->user_policy
.governor
= policy
->governor
;
512 * show_scaling_driver - show the cpufreq driver currently loaded
514 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
516 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
520 * show_scaling_available_governors - show the available CPUfreq governors
522 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
526 struct cpufreq_governor
*t
;
528 if (!cpufreq_driver
->target
) {
529 i
+= sprintf(buf
, "performance powersave");
533 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
) {
534 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
535 - (CPUFREQ_NAME_LEN
+ 2)))
537 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
540 i
+= sprintf(&buf
[i
], "\n");
544 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
549 for_each_cpu(cpu
, mask
) {
551 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
552 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
553 if (i
>= (PAGE_SIZE
- 5))
556 i
+= sprintf(&buf
[i
], "\n");
559 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
562 * show_related_cpus - show the CPUs affected by each transition even if
563 * hw coordination is in use
565 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
567 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
571 * show_affected_cpus - show the CPUs affected by each transition
573 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
575 return cpufreq_show_cpus(policy
->cpus
, buf
);
578 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
579 const char *buf
, size_t count
)
581 unsigned int freq
= 0;
584 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
587 ret
= sscanf(buf
, "%u", &freq
);
591 policy
->governor
->store_setspeed(policy
, freq
);
596 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
598 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
599 return sprintf(buf
, "<unsupported>\n");
601 return policy
->governor
->show_setspeed(policy
, buf
);
605 * show_bios_limit - show the current cpufreq HW/BIOS limitation
607 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
611 if (cpufreq_driver
->bios_limit
) {
612 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
614 return sprintf(buf
, "%u\n", limit
);
616 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
619 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
620 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
621 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
622 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
623 cpufreq_freq_attr_ro(scaling_available_governors
);
624 cpufreq_freq_attr_ro(scaling_driver
);
625 cpufreq_freq_attr_ro(scaling_cur_freq
);
626 cpufreq_freq_attr_ro(bios_limit
);
627 cpufreq_freq_attr_ro(related_cpus
);
628 cpufreq_freq_attr_ro(affected_cpus
);
629 cpufreq_freq_attr_rw(scaling_min_freq
);
630 cpufreq_freq_attr_rw(scaling_max_freq
);
631 cpufreq_freq_attr_rw(scaling_governor
);
632 cpufreq_freq_attr_rw(scaling_setspeed
);
634 static struct attribute
*default_attrs
[] = {
635 &cpuinfo_min_freq
.attr
,
636 &cpuinfo_max_freq
.attr
,
637 &cpuinfo_transition_latency
.attr
,
638 &scaling_min_freq
.attr
,
639 &scaling_max_freq
.attr
,
642 &scaling_governor
.attr
,
643 &scaling_driver
.attr
,
644 &scaling_available_governors
.attr
,
645 &scaling_setspeed
.attr
,
649 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
650 #define to_attr(a) container_of(a, struct freq_attr, attr)
652 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
654 struct cpufreq_policy
*policy
= to_policy(kobj
);
655 struct freq_attr
*fattr
= to_attr(attr
);
656 ssize_t ret
= -EINVAL
;
658 if (!down_read_trylock(&cpufreq_rwsem
))
661 if (lock_policy_rwsem_read(policy
->cpu
) < 0)
665 ret
= fattr
->show(policy
, buf
);
669 unlock_policy_rwsem_read(policy
->cpu
);
672 up_read(&cpufreq_rwsem
);
677 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
678 const char *buf
, size_t count
)
680 struct cpufreq_policy
*policy
= to_policy(kobj
);
681 struct freq_attr
*fattr
= to_attr(attr
);
682 ssize_t ret
= -EINVAL
;
686 if (!cpu_online(policy
->cpu
))
689 if (!down_read_trylock(&cpufreq_rwsem
))
692 if (lock_policy_rwsem_write(policy
->cpu
) < 0)
696 ret
= fattr
->store(policy
, buf
, count
);
700 unlock_policy_rwsem_write(policy
->cpu
);
703 up_read(&cpufreq_rwsem
);
710 static void cpufreq_sysfs_release(struct kobject
*kobj
)
712 struct cpufreq_policy
*policy
= to_policy(kobj
);
713 pr_debug("last reference is dropped\n");
714 complete(&policy
->kobj_unregister
);
717 static const struct sysfs_ops sysfs_ops
= {
722 static struct kobj_type ktype_cpufreq
= {
723 .sysfs_ops
= &sysfs_ops
,
724 .default_attrs
= default_attrs
,
725 .release
= cpufreq_sysfs_release
,
728 struct kobject
*cpufreq_global_kobject
;
729 EXPORT_SYMBOL(cpufreq_global_kobject
);
731 static int cpufreq_global_kobject_usage
;
733 int cpufreq_get_global_kobject(void)
735 if (!cpufreq_global_kobject_usage
++)
736 return kobject_add(cpufreq_global_kobject
,
737 &cpu_subsys
.dev_root
->kobj
, "%s", "cpufreq");
741 EXPORT_SYMBOL(cpufreq_get_global_kobject
);
743 void cpufreq_put_global_kobject(void)
745 if (!--cpufreq_global_kobject_usage
)
746 kobject_del(cpufreq_global_kobject
);
748 EXPORT_SYMBOL(cpufreq_put_global_kobject
);
750 int cpufreq_sysfs_create_file(const struct attribute
*attr
)
752 int ret
= cpufreq_get_global_kobject();
755 ret
= sysfs_create_file(cpufreq_global_kobject
, attr
);
757 cpufreq_put_global_kobject();
762 EXPORT_SYMBOL(cpufreq_sysfs_create_file
);
764 void cpufreq_sysfs_remove_file(const struct attribute
*attr
)
766 sysfs_remove_file(cpufreq_global_kobject
, attr
);
767 cpufreq_put_global_kobject();
769 EXPORT_SYMBOL(cpufreq_sysfs_remove_file
);
771 /* symlink affected CPUs */
772 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
777 for_each_cpu(j
, policy
->cpus
) {
778 struct device
*cpu_dev
;
780 if (j
== policy
->cpu
)
783 pr_debug("Adding link for CPU: %u\n", j
);
784 cpu_dev
= get_cpu_device(j
);
785 ret
= sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
,
793 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
,
796 struct freq_attr
**drv_attr
;
799 /* prepare interface data */
800 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
,
801 &dev
->kobj
, "cpufreq");
805 /* set up files for this cpu device */
806 drv_attr
= cpufreq_driver
->attr
;
807 while ((drv_attr
) && (*drv_attr
)) {
808 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
810 goto err_out_kobj_put
;
813 if (cpufreq_driver
->get
) {
814 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
816 goto err_out_kobj_put
;
818 if (cpufreq_driver
->target
) {
819 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
821 goto err_out_kobj_put
;
823 if (cpufreq_driver
->bios_limit
) {
824 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
826 goto err_out_kobj_put
;
829 ret
= cpufreq_add_dev_symlink(policy
);
831 goto err_out_kobj_put
;
836 kobject_put(&policy
->kobj
);
837 wait_for_completion(&policy
->kobj_unregister
);
841 static void cpufreq_init_policy(struct cpufreq_policy
*policy
)
843 struct cpufreq_policy new_policy
;
846 memcpy(&new_policy
, policy
, sizeof(*policy
));
847 /* assure that the starting sequence is run in __cpufreq_set_policy */
848 policy
->governor
= NULL
;
850 /* set default policy */
851 ret
= __cpufreq_set_policy(policy
, &new_policy
);
852 policy
->user_policy
.policy
= policy
->policy
;
853 policy
->user_policy
.governor
= policy
->governor
;
856 pr_debug("setting policy failed\n");
857 if (cpufreq_driver
->exit
)
858 cpufreq_driver
->exit(policy
);
862 #ifdef CONFIG_HOTPLUG_CPU
863 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
,
864 unsigned int cpu
, struct device
*dev
,
867 int ret
= 0, has_target
= !!cpufreq_driver
->target
;
871 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
873 pr_err("%s: Failed to stop governor\n", __func__
);
878 lock_policy_rwsem_write(policy
->cpu
);
880 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
882 cpumask_set_cpu(cpu
, policy
->cpus
);
883 per_cpu(cpufreq_cpu_data
, cpu
) = policy
;
884 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
886 unlock_policy_rwsem_write(policy
->cpu
);
889 if ((ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
)) ||
890 (ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))) {
891 pr_err("%s: Failed to start governor\n", __func__
);
896 /* Don't touch sysfs links during light-weight init */
898 ret
= sysfs_create_link(&dev
->kobj
, &policy
->kobj
, "cpufreq");
904 static struct cpufreq_policy
*cpufreq_policy_restore(unsigned int cpu
)
906 struct cpufreq_policy
*policy
;
909 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
911 policy
= per_cpu(cpufreq_cpu_data_fallback
, cpu
);
913 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
918 static struct cpufreq_policy
*cpufreq_policy_alloc(void)
920 struct cpufreq_policy
*policy
;
922 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
926 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
927 goto err_free_policy
;
929 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
930 goto err_free_cpumask
;
932 INIT_LIST_HEAD(&policy
->policy_list
);
936 free_cpumask_var(policy
->cpus
);
943 static void cpufreq_policy_free(struct cpufreq_policy
*policy
)
945 free_cpumask_var(policy
->related_cpus
);
946 free_cpumask_var(policy
->cpus
);
950 static void update_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
952 if (cpu
== policy
->cpu
)
956 * Take direct locks as lock_policy_rwsem_write wouldn't work here.
957 * Also lock for last cpu is enough here as contention will happen only
958 * after policy->cpu is changed and after it is changed, other threads
959 * will try to acquire lock for new cpu. And policy is already updated
962 down_write(&per_cpu(cpu_policy_rwsem
, policy
->cpu
));
964 policy
->last_cpu
= policy
->cpu
;
967 up_write(&per_cpu(cpu_policy_rwsem
, policy
->last_cpu
));
969 #ifdef CONFIG_CPU_FREQ_TABLE
970 cpufreq_frequency_table_update_policy_cpu(policy
);
972 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
973 CPUFREQ_UPDATE_POLICY_CPU
, policy
);
976 static int __cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
,
979 unsigned int j
, cpu
= dev
->id
;
981 struct cpufreq_policy
*policy
;
983 #ifdef CONFIG_HOTPLUG_CPU
984 struct cpufreq_policy
*tpolicy
;
985 struct cpufreq_governor
*gov
;
988 if (cpu_is_offline(cpu
))
991 pr_debug("adding CPU %u\n", cpu
);
994 /* check whether a different CPU already registered this
995 * CPU because it is in the same boat. */
996 policy
= cpufreq_cpu_get(cpu
);
997 if (unlikely(policy
)) {
998 cpufreq_cpu_put(policy
);
1003 if (!down_read_trylock(&cpufreq_rwsem
))
1006 #ifdef CONFIG_HOTPLUG_CPU
1007 /* Check if this cpu was hot-unplugged earlier and has siblings */
1008 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1009 list_for_each_entry(tpolicy
, &cpufreq_policy_list
, policy_list
) {
1010 if (cpumask_test_cpu(cpu
, tpolicy
->related_cpus
)) {
1011 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1012 ret
= cpufreq_add_policy_cpu(tpolicy
, cpu
, dev
, frozen
);
1013 up_read(&cpufreq_rwsem
);
1017 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1021 /* Restore the saved policy when doing light-weight init */
1022 policy
= cpufreq_policy_restore(cpu
);
1024 policy
= cpufreq_policy_alloc();
1031 * In the resume path, since we restore a saved policy, the assignment
1032 * to policy->cpu is like an update of the existing policy, rather than
1033 * the creation of a brand new one. So we need to perform this update
1034 * by invoking update_policy_cpu().
1036 if (frozen
&& cpu
!= policy
->cpu
)
1037 update_policy_cpu(policy
, cpu
);
1041 policy
->governor
= CPUFREQ_DEFAULT_GOVERNOR
;
1042 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1044 init_completion(&policy
->kobj_unregister
);
1045 INIT_WORK(&policy
->update
, handle_update
);
1047 /* call driver. From then on the cpufreq must be able
1048 * to accept all calls to ->verify and ->setpolicy for this CPU
1050 ret
= cpufreq_driver
->init(policy
);
1052 pr_debug("initialization failed\n");
1053 goto err_set_policy_cpu
;
1056 /* related cpus should atleast have policy->cpus */
1057 cpumask_or(policy
->related_cpus
, policy
->related_cpus
, policy
->cpus
);
1060 * affected cpus must always be the one, which are online. We aren't
1061 * managing offline cpus here.
1063 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1065 policy
->user_policy
.min
= policy
->min
;
1066 policy
->user_policy
.max
= policy
->max
;
1068 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1069 CPUFREQ_START
, policy
);
1071 #ifdef CONFIG_HOTPLUG_CPU
1072 gov
= __find_governor(per_cpu(cpufreq_cpu_governor
, cpu
));
1074 policy
->governor
= gov
;
1075 pr_debug("Restoring governor %s for cpu %d\n",
1076 policy
->governor
->name
, cpu
);
1080 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1081 for_each_cpu(j
, policy
->cpus
)
1082 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1083 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1086 ret
= cpufreq_add_dev_interface(policy
, dev
);
1088 goto err_out_unregister
;
1091 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1092 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1093 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1095 cpufreq_init_policy(policy
);
1097 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1098 up_read(&cpufreq_rwsem
);
1100 pr_debug("initialization complete\n");
1105 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1106 for_each_cpu(j
, policy
->cpus
)
1107 per_cpu(cpufreq_cpu_data
, j
) = NULL
;
1108 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1111 cpufreq_policy_free(policy
);
1113 up_read(&cpufreq_rwsem
);
1119 * cpufreq_add_dev - add a CPU device
1121 * Adds the cpufreq interface for a CPU device.
1123 * The Oracle says: try running cpufreq registration/unregistration concurrently
1124 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1125 * mess up, but more thorough testing is needed. - Mathieu
1127 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1129 return __cpufreq_add_dev(dev
, sif
, false);
1132 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy
*policy
,
1133 unsigned int old_cpu
, bool frozen
)
1135 struct device
*cpu_dev
;
1138 /* first sibling now owns the new sysfs dir */
1139 cpu_dev
= get_cpu_device(cpumask_any_but(policy
->cpus
, old_cpu
));
1141 /* Don't touch sysfs files during light-weight tear-down */
1145 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
1146 ret
= kobject_move(&policy
->kobj
, &cpu_dev
->kobj
);
1148 pr_err("%s: Failed to move kobj: %d", __func__
, ret
);
1150 WARN_ON(lock_policy_rwsem_write(old_cpu
));
1151 cpumask_set_cpu(old_cpu
, policy
->cpus
);
1152 unlock_policy_rwsem_write(old_cpu
);
1154 ret
= sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
,
1163 static int __cpufreq_remove_dev_prepare(struct device
*dev
,
1164 struct subsys_interface
*sif
,
1167 unsigned int cpu
= dev
->id
, cpus
;
1169 unsigned long flags
;
1170 struct cpufreq_policy
*policy
;
1172 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1174 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1176 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1178 /* Save the policy somewhere when doing a light-weight tear-down */
1180 per_cpu(cpufreq_cpu_data_fallback
, cpu
) = policy
;
1182 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1185 pr_debug("%s: No cpu_data found\n", __func__
);
1189 if (cpufreq_driver
->target
) {
1190 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1192 pr_err("%s: Failed to stop governor\n", __func__
);
1197 #ifdef CONFIG_HOTPLUG_CPU
1198 if (!cpufreq_driver
->setpolicy
)
1199 strncpy(per_cpu(cpufreq_cpu_governor
, cpu
),
1200 policy
->governor
->name
, CPUFREQ_NAME_LEN
);
1203 lock_policy_rwsem_read(cpu
);
1204 cpus
= cpumask_weight(policy
->cpus
);
1205 unlock_policy_rwsem_read(cpu
);
1207 if (cpu
!= policy
->cpu
) {
1209 sysfs_remove_link(&dev
->kobj
, "cpufreq");
1210 } else if (cpus
> 1) {
1212 new_cpu
= cpufreq_nominate_new_policy_cpu(policy
, cpu
, frozen
);
1214 update_policy_cpu(policy
, new_cpu
);
1217 pr_debug("%s: policy Kobject moved to cpu: %d "
1218 "from: %d\n",__func__
, new_cpu
, cpu
);
1226 static int __cpufreq_remove_dev_finish(struct device
*dev
,
1227 struct subsys_interface
*sif
,
1230 unsigned int cpu
= dev
->id
, cpus
;
1232 unsigned long flags
;
1233 struct cpufreq_policy
*policy
;
1234 struct kobject
*kobj
;
1235 struct completion
*cmp
;
1237 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1238 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1239 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1242 pr_debug("%s: No cpu_data found\n", __func__
);
1246 WARN_ON(lock_policy_rwsem_write(cpu
));
1247 cpus
= cpumask_weight(policy
->cpus
);
1250 cpumask_clear_cpu(cpu
, policy
->cpus
);
1251 unlock_policy_rwsem_write(cpu
);
1253 /* If cpu is last user of policy, free policy */
1255 if (cpufreq_driver
->target
) {
1256 ret
= __cpufreq_governor(policy
,
1257 CPUFREQ_GOV_POLICY_EXIT
);
1259 pr_err("%s: Failed to exit governor\n",
1266 lock_policy_rwsem_read(cpu
);
1267 kobj
= &policy
->kobj
;
1268 cmp
= &policy
->kobj_unregister
;
1269 unlock_policy_rwsem_read(cpu
);
1273 * We need to make sure that the underlying kobj is
1274 * actually not referenced anymore by anybody before we
1275 * proceed with unloading.
1277 pr_debug("waiting for dropping of refcount\n");
1278 wait_for_completion(cmp
);
1279 pr_debug("wait complete\n");
1283 * Perform the ->exit() even during light-weight tear-down,
1284 * since this is a core component, and is essential for the
1285 * subsequent light-weight ->init() to succeed.
1287 if (cpufreq_driver
->exit
)
1288 cpufreq_driver
->exit(policy
);
1290 /* Remove policy from list of active policies */
1291 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1292 list_del(&policy
->policy_list
);
1293 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1296 cpufreq_policy_free(policy
);
1298 if (cpufreq_driver
->target
) {
1299 if ((ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
)) ||
1300 (ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))) {
1301 pr_err("%s: Failed to start governor\n",
1308 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1313 * __cpufreq_remove_dev - remove a CPU device
1315 * Removes the cpufreq interface for a CPU device.
1316 * Caller should already have policy_rwsem in write mode for this CPU.
1317 * This routine frees the rwsem before returning.
1319 static inline int __cpufreq_remove_dev(struct device
*dev
,
1320 struct subsys_interface
*sif
,
1325 ret
= __cpufreq_remove_dev_prepare(dev
, sif
, frozen
);
1328 ret
= __cpufreq_remove_dev_finish(dev
, sif
, frozen
);
1333 static int cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1335 unsigned int cpu
= dev
->id
;
1338 if (cpu_is_offline(cpu
))
1341 retval
= __cpufreq_remove_dev(dev
, sif
, false);
1345 static void handle_update(struct work_struct
*work
)
1347 struct cpufreq_policy
*policy
=
1348 container_of(work
, struct cpufreq_policy
, update
);
1349 unsigned int cpu
= policy
->cpu
;
1350 pr_debug("handle_update for cpu %u called\n", cpu
);
1351 cpufreq_update_policy(cpu
);
1355 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1358 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1359 * @new_freq: CPU frequency the CPU actually runs at
1361 * We adjust to current frequency first, and need to clean up later.
1362 * So either call to cpufreq_update_policy() or schedule handle_update()).
1364 static void cpufreq_out_of_sync(unsigned int cpu
, unsigned int old_freq
,
1365 unsigned int new_freq
)
1367 struct cpufreq_policy
*policy
;
1368 struct cpufreq_freqs freqs
;
1369 unsigned long flags
;
1371 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1372 "core thinks of %u, is %u kHz.\n", old_freq
, new_freq
);
1374 freqs
.old
= old_freq
;
1375 freqs
.new = new_freq
;
1377 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1378 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1379 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1381 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_PRECHANGE
);
1382 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_POSTCHANGE
);
1386 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1389 * This is the last known freq, without actually getting it from the driver.
1390 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1392 unsigned int cpufreq_quick_get(unsigned int cpu
)
1394 struct cpufreq_policy
*policy
;
1395 unsigned int ret_freq
= 0;
1397 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
1398 return cpufreq_driver
->get(cpu
);
1400 policy
= cpufreq_cpu_get(cpu
);
1402 ret_freq
= policy
->cur
;
1403 cpufreq_cpu_put(policy
);
1408 EXPORT_SYMBOL(cpufreq_quick_get
);
1411 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1414 * Just return the max possible frequency for a given CPU.
1416 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1418 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1419 unsigned int ret_freq
= 0;
1422 ret_freq
= policy
->max
;
1423 cpufreq_cpu_put(policy
);
1428 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1430 static unsigned int __cpufreq_get(unsigned int cpu
)
1432 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1433 unsigned int ret_freq
= 0;
1435 if (!cpufreq_driver
->get
)
1438 ret_freq
= cpufreq_driver
->get(cpu
);
1440 if (ret_freq
&& policy
->cur
&&
1441 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1442 /* verify no discrepancy between actual and
1443 saved value exists */
1444 if (unlikely(ret_freq
!= policy
->cur
)) {
1445 cpufreq_out_of_sync(cpu
, policy
->cur
, ret_freq
);
1446 schedule_work(&policy
->update
);
1454 * cpufreq_get - get the current CPU frequency (in kHz)
1457 * Get the CPU current (static) CPU frequency
1459 unsigned int cpufreq_get(unsigned int cpu
)
1461 unsigned int ret_freq
= 0;
1463 if (cpufreq_disabled() || !cpufreq_driver
)
1466 if (!down_read_trylock(&cpufreq_rwsem
))
1469 if (unlikely(lock_policy_rwsem_read(cpu
)))
1472 ret_freq
= __cpufreq_get(cpu
);
1474 unlock_policy_rwsem_read(cpu
);
1477 up_read(&cpufreq_rwsem
);
1481 EXPORT_SYMBOL(cpufreq_get
);
1483 static struct subsys_interface cpufreq_interface
= {
1485 .subsys
= &cpu_subsys
,
1486 .add_dev
= cpufreq_add_dev
,
1487 .remove_dev
= cpufreq_remove_dev
,
1491 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1493 * This function is only executed for the boot processor. The other CPUs
1494 * have been put offline by means of CPU hotplug.
1496 static int cpufreq_bp_suspend(void)
1500 int cpu
= smp_processor_id();
1501 struct cpufreq_policy
*policy
;
1503 pr_debug("suspending cpu %u\n", cpu
);
1505 /* If there's no policy for the boot CPU, we have nothing to do. */
1506 policy
= cpufreq_cpu_get(cpu
);
1510 if (cpufreq_driver
->suspend
) {
1511 ret
= cpufreq_driver
->suspend(policy
);
1513 printk(KERN_ERR
"cpufreq: suspend failed in ->suspend "
1514 "step on CPU %u\n", policy
->cpu
);
1517 cpufreq_cpu_put(policy
);
1522 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1524 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1525 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1526 * restored. It will verify that the current freq is in sync with
1527 * what we believe it to be. This is a bit later than when it
1528 * should be, but nonethteless it's better than calling
1529 * cpufreq_driver->get() here which might re-enable interrupts...
1531 * This function is only executed for the boot CPU. The other CPUs have not
1532 * been turned on yet.
1534 static void cpufreq_bp_resume(void)
1538 int cpu
= smp_processor_id();
1539 struct cpufreq_policy
*policy
;
1541 pr_debug("resuming cpu %u\n", cpu
);
1543 /* If there's no policy for the boot CPU, we have nothing to do. */
1544 policy
= cpufreq_cpu_get(cpu
);
1548 if (cpufreq_driver
->resume
) {
1549 ret
= cpufreq_driver
->resume(policy
);
1551 printk(KERN_ERR
"cpufreq: resume failed in ->resume "
1552 "step on CPU %u\n", policy
->cpu
);
1557 schedule_work(&policy
->update
);
1560 cpufreq_cpu_put(policy
);
1563 static struct syscore_ops cpufreq_syscore_ops
= {
1564 .suspend
= cpufreq_bp_suspend
,
1565 .resume
= cpufreq_bp_resume
,
1569 * cpufreq_get_current_driver - return current driver's name
1571 * Return the name string of the currently loaded cpufreq driver
1574 const char *cpufreq_get_current_driver(void)
1577 return cpufreq_driver
->name
;
1581 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1583 /*********************************************************************
1584 * NOTIFIER LISTS INTERFACE *
1585 *********************************************************************/
1588 * cpufreq_register_notifier - register a driver with cpufreq
1589 * @nb: notifier function to register
1590 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1592 * Add a driver to one of two lists: either a list of drivers that
1593 * are notified about clock rate changes (once before and once after
1594 * the transition), or a list of drivers that are notified about
1595 * changes in cpufreq policy.
1597 * This function may sleep, and has the same return conditions as
1598 * blocking_notifier_chain_register.
1600 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1604 if (cpufreq_disabled())
1607 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1610 case CPUFREQ_TRANSITION_NOTIFIER
:
1611 ret
= srcu_notifier_chain_register(
1612 &cpufreq_transition_notifier_list
, nb
);
1614 case CPUFREQ_POLICY_NOTIFIER
:
1615 ret
= blocking_notifier_chain_register(
1616 &cpufreq_policy_notifier_list
, nb
);
1624 EXPORT_SYMBOL(cpufreq_register_notifier
);
1627 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1628 * @nb: notifier block to be unregistered
1629 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1631 * Remove a driver from the CPU frequency notifier list.
1633 * This function may sleep, and has the same return conditions as
1634 * blocking_notifier_chain_unregister.
1636 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1640 if (cpufreq_disabled())
1644 case CPUFREQ_TRANSITION_NOTIFIER
:
1645 ret
= srcu_notifier_chain_unregister(
1646 &cpufreq_transition_notifier_list
, nb
);
1648 case CPUFREQ_POLICY_NOTIFIER
:
1649 ret
= blocking_notifier_chain_unregister(
1650 &cpufreq_policy_notifier_list
, nb
);
1658 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1661 /*********************************************************************
1663 *********************************************************************/
1665 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1666 unsigned int target_freq
,
1667 unsigned int relation
)
1669 int retval
= -EINVAL
;
1670 unsigned int old_target_freq
= target_freq
;
1672 if (cpufreq_disabled())
1675 /* Make sure that target_freq is within supported range */
1676 if (target_freq
> policy
->max
)
1677 target_freq
= policy
->max
;
1678 if (target_freq
< policy
->min
)
1679 target_freq
= policy
->min
;
1681 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1682 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1684 if (target_freq
== policy
->cur
)
1687 if (cpufreq_driver
->target
)
1688 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
1692 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1694 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1695 unsigned int target_freq
,
1696 unsigned int relation
)
1700 if (unlikely(lock_policy_rwsem_write(policy
->cpu
)))
1703 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1705 unlock_policy_rwsem_write(policy
->cpu
);
1710 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1713 * when "event" is CPUFREQ_GOV_LIMITS
1716 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1721 /* Only must be defined when default governor is known to have latency
1722 restrictions, like e.g. conservative or ondemand.
1723 That this is the case is already ensured in Kconfig
1725 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1726 struct cpufreq_governor
*gov
= &cpufreq_gov_performance
;
1728 struct cpufreq_governor
*gov
= NULL
;
1731 if (policy
->governor
->max_transition_latency
&&
1732 policy
->cpuinfo
.transition_latency
>
1733 policy
->governor
->max_transition_latency
) {
1737 printk(KERN_WARNING
"%s governor failed, too long"
1738 " transition latency of HW, fallback"
1739 " to %s governor\n",
1740 policy
->governor
->name
,
1742 policy
->governor
= gov
;
1746 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1747 if (!try_module_get(policy
->governor
->owner
))
1750 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1751 policy
->cpu
, event
);
1753 mutex_lock(&cpufreq_governor_lock
);
1754 if ((policy
->governor_enabled
&& event
== CPUFREQ_GOV_START
)
1755 || (!policy
->governor_enabled
1756 && (event
== CPUFREQ_GOV_LIMITS
|| event
== CPUFREQ_GOV_STOP
))) {
1757 mutex_unlock(&cpufreq_governor_lock
);
1761 if (event
== CPUFREQ_GOV_STOP
)
1762 policy
->governor_enabled
= false;
1763 else if (event
== CPUFREQ_GOV_START
)
1764 policy
->governor_enabled
= true;
1766 mutex_unlock(&cpufreq_governor_lock
);
1768 ret
= policy
->governor
->governor(policy
, event
);
1771 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1772 policy
->governor
->initialized
++;
1773 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
1774 policy
->governor
->initialized
--;
1776 /* Restore original values */
1777 mutex_lock(&cpufreq_governor_lock
);
1778 if (event
== CPUFREQ_GOV_STOP
)
1779 policy
->governor_enabled
= true;
1780 else if (event
== CPUFREQ_GOV_START
)
1781 policy
->governor_enabled
= false;
1782 mutex_unlock(&cpufreq_governor_lock
);
1785 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
1786 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
1787 module_put(policy
->governor
->owner
);
1792 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
1799 if (cpufreq_disabled())
1802 mutex_lock(&cpufreq_governor_mutex
);
1804 governor
->initialized
= 0;
1806 if (__find_governor(governor
->name
) == NULL
) {
1808 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
1811 mutex_unlock(&cpufreq_governor_mutex
);
1814 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
1816 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
1818 #ifdef CONFIG_HOTPLUG_CPU
1825 if (cpufreq_disabled())
1828 #ifdef CONFIG_HOTPLUG_CPU
1829 for_each_present_cpu(cpu
) {
1830 if (cpu_online(cpu
))
1832 if (!strcmp(per_cpu(cpufreq_cpu_governor
, cpu
), governor
->name
))
1833 strcpy(per_cpu(cpufreq_cpu_governor
, cpu
), "\0");
1837 mutex_lock(&cpufreq_governor_mutex
);
1838 list_del(&governor
->governor_list
);
1839 mutex_unlock(&cpufreq_governor_mutex
);
1842 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
1845 /*********************************************************************
1846 * POLICY INTERFACE *
1847 *********************************************************************/
1850 * cpufreq_get_policy - get the current cpufreq_policy
1851 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1854 * Reads the current cpufreq policy.
1856 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
1858 struct cpufreq_policy
*cpu_policy
;
1862 cpu_policy
= cpufreq_cpu_get(cpu
);
1866 memcpy(policy
, cpu_policy
, sizeof(*policy
));
1868 cpufreq_cpu_put(cpu_policy
);
1871 EXPORT_SYMBOL(cpufreq_get_policy
);
1874 * data : current policy.
1875 * policy : policy to be set.
1877 static int __cpufreq_set_policy(struct cpufreq_policy
*policy
,
1878 struct cpufreq_policy
*new_policy
)
1880 int ret
= 0, failed
= 1;
1882 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy
->cpu
,
1883 new_policy
->min
, new_policy
->max
);
1885 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
1887 if (new_policy
->min
> policy
->max
|| new_policy
->max
< policy
->min
) {
1892 /* verify the cpu speed can be set within this limit */
1893 ret
= cpufreq_driver
->verify(new_policy
);
1897 /* adjust if necessary - all reasons */
1898 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1899 CPUFREQ_ADJUST
, new_policy
);
1901 /* adjust if necessary - hardware incompatibility*/
1902 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1903 CPUFREQ_INCOMPATIBLE
, new_policy
);
1906 * verify the cpu speed can be set within this limit, which might be
1907 * different to the first one
1909 ret
= cpufreq_driver
->verify(new_policy
);
1913 /* notification of the new policy */
1914 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1915 CPUFREQ_NOTIFY
, new_policy
);
1917 policy
->min
= new_policy
->min
;
1918 policy
->max
= new_policy
->max
;
1920 pr_debug("new min and max freqs are %u - %u kHz\n",
1921 policy
->min
, policy
->max
);
1923 if (cpufreq_driver
->setpolicy
) {
1924 policy
->policy
= new_policy
->policy
;
1925 pr_debug("setting range\n");
1926 ret
= cpufreq_driver
->setpolicy(new_policy
);
1928 if (new_policy
->governor
!= policy
->governor
) {
1929 /* save old, working values */
1930 struct cpufreq_governor
*old_gov
= policy
->governor
;
1932 pr_debug("governor switch\n");
1934 /* end old governor */
1935 if (policy
->governor
) {
1936 __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1937 unlock_policy_rwsem_write(new_policy
->cpu
);
1938 __cpufreq_governor(policy
,
1939 CPUFREQ_GOV_POLICY_EXIT
);
1940 lock_policy_rwsem_write(new_policy
->cpu
);
1943 /* start new governor */
1944 policy
->governor
= new_policy
->governor
;
1945 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
)) {
1946 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_START
)) {
1949 unlock_policy_rwsem_write(new_policy
->cpu
);
1950 __cpufreq_governor(policy
,
1951 CPUFREQ_GOV_POLICY_EXIT
);
1952 lock_policy_rwsem_write(new_policy
->cpu
);
1957 /* new governor failed, so re-start old one */
1958 pr_debug("starting governor %s failed\n",
1959 policy
->governor
->name
);
1961 policy
->governor
= old_gov
;
1962 __cpufreq_governor(policy
,
1963 CPUFREQ_GOV_POLICY_INIT
);
1964 __cpufreq_governor(policy
,
1970 /* might be a policy change, too, so fall through */
1972 pr_debug("governor: change or update limits\n");
1973 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1981 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1982 * @cpu: CPU which shall be re-evaluated
1984 * Useful for policy notifiers which have different necessities
1985 * at different times.
1987 int cpufreq_update_policy(unsigned int cpu
)
1989 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1990 struct cpufreq_policy new_policy
;
1998 if (unlikely(lock_policy_rwsem_write(cpu
))) {
2003 pr_debug("updating policy for CPU %u\n", cpu
);
2004 memcpy(&new_policy
, policy
, sizeof(*policy
));
2005 new_policy
.min
= policy
->user_policy
.min
;
2006 new_policy
.max
= policy
->user_policy
.max
;
2007 new_policy
.policy
= policy
->user_policy
.policy
;
2008 new_policy
.governor
= policy
->user_policy
.governor
;
2011 * BIOS might change freq behind our back
2012 * -> ask driver for current freq and notify governors about a change
2014 if (cpufreq_driver
->get
) {
2015 new_policy
.cur
= cpufreq_driver
->get(cpu
);
2017 pr_debug("Driver did not initialize current freq");
2018 policy
->cur
= new_policy
.cur
;
2020 if (policy
->cur
!= new_policy
.cur
&& cpufreq_driver
->target
)
2021 cpufreq_out_of_sync(cpu
, policy
->cur
,
2026 ret
= __cpufreq_set_policy(policy
, &new_policy
);
2028 unlock_policy_rwsem_write(cpu
);
2031 cpufreq_cpu_put(policy
);
2035 EXPORT_SYMBOL(cpufreq_update_policy
);
2037 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2038 unsigned long action
, void *hcpu
)
2040 unsigned int cpu
= (unsigned long)hcpu
;
2042 bool frozen
= false;
2044 dev
= get_cpu_device(cpu
);
2047 if (action
& CPU_TASKS_FROZEN
)
2050 switch (action
& ~CPU_TASKS_FROZEN
) {
2052 __cpufreq_add_dev(dev
, NULL
, frozen
);
2053 cpufreq_update_policy(cpu
);
2056 case CPU_DOWN_PREPARE
:
2057 __cpufreq_remove_dev_prepare(dev
, NULL
, frozen
);
2061 __cpufreq_remove_dev_finish(dev
, NULL
, frozen
);
2064 case CPU_DOWN_FAILED
:
2065 __cpufreq_add_dev(dev
, NULL
, frozen
);
2072 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2073 .notifier_call
= cpufreq_cpu_callback
,
2076 /*********************************************************************
2077 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2078 *********************************************************************/
2081 * cpufreq_register_driver - register a CPU Frequency driver
2082 * @driver_data: A struct cpufreq_driver containing the values#
2083 * submitted by the CPU Frequency driver.
2085 * Registers a CPU Frequency driver to this core code. This code
2086 * returns zero on success, -EBUSY when another driver got here first
2087 * (and isn't unregistered in the meantime).
2090 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2092 unsigned long flags
;
2095 if (cpufreq_disabled())
2098 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2099 ((!driver_data
->setpolicy
) && (!driver_data
->target
)))
2102 pr_debug("trying to register driver %s\n", driver_data
->name
);
2104 if (driver_data
->setpolicy
)
2105 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2107 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2108 if (cpufreq_driver
) {
2109 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2112 cpufreq_driver
= driver_data
;
2113 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2115 ret
= subsys_interface_register(&cpufreq_interface
);
2117 goto err_null_driver
;
2119 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
)) {
2123 /* check for at least one working CPU */
2124 for (i
= 0; i
< nr_cpu_ids
; i
++)
2125 if (cpu_possible(i
) && per_cpu(cpufreq_cpu_data
, i
)) {
2130 /* if all ->init() calls failed, unregister */
2132 pr_debug("no CPU initialized for driver %s\n",
2138 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2139 pr_debug("driver %s up and running\n", driver_data
->name
);
2143 subsys_interface_unregister(&cpufreq_interface
);
2145 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2146 cpufreq_driver
= NULL
;
2147 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2150 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2153 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2155 * Unregister the current CPUFreq driver. Only call this if you have
2156 * the right to do so, i.e. if you have succeeded in initialising before!
2157 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2158 * currently not initialised.
2160 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2162 unsigned long flags
;
2164 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2167 pr_debug("unregistering driver %s\n", driver
->name
);
2169 subsys_interface_unregister(&cpufreq_interface
);
2170 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2172 down_write(&cpufreq_rwsem
);
2173 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2175 cpufreq_driver
= NULL
;
2177 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2178 up_write(&cpufreq_rwsem
);
2182 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2184 static int __init
cpufreq_core_init(void)
2188 if (cpufreq_disabled())
2191 for_each_possible_cpu(cpu
)
2192 init_rwsem(&per_cpu(cpu_policy_rwsem
, cpu
));
2194 cpufreq_global_kobject
= kobject_create();
2195 BUG_ON(!cpufreq_global_kobject
);
2196 register_syscore_ops(&cpufreq_syscore_ops
);
2200 core_initcall(cpufreq_core_init
);