2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list
);
36 static inline bool policy_is_inactive(struct cpufreq_policy
*policy
)
38 return cpumask_empty(policy
->cpus
);
41 /* Macros to iterate over CPU policies */
42 #define for_each_suitable_policy(__policy, __active) \
43 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
44 if ((__active) == !policy_is_inactive(__policy))
46 #define for_each_active_policy(__policy) \
47 for_each_suitable_policy(__policy, true)
48 #define for_each_inactive_policy(__policy) \
49 for_each_suitable_policy(__policy, false)
51 #define for_each_policy(__policy) \
52 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
54 /* Iterate over governors */
55 static LIST_HEAD(cpufreq_governor_list
);
56 #define for_each_governor(__governor) \
57 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
60 * The "cpufreq driver" - the arch- or hardware-dependent low
61 * level driver of CPUFreq support, and its spinlock. This lock
62 * also protects the cpufreq_cpu_data array.
64 static struct cpufreq_driver
*cpufreq_driver
;
65 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
66 static DEFINE_RWLOCK(cpufreq_driver_lock
);
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended
;
71 static inline bool has_target(void)
73 return cpufreq_driver
->target_index
|| cpufreq_driver
->target
;
76 /* internal prototypes */
77 static int cpufreq_governor(struct cpufreq_policy
*policy
, unsigned int event
);
78 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
);
79 static int cpufreq_start_governor(struct cpufreq_policy
*policy
);
81 static inline int cpufreq_exit_governor(struct cpufreq_policy
*policy
)
83 return cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
87 * Two notifier lists: the "policy" list is involved in the
88 * validation process for a new CPU frequency policy; the
89 * "transition" list for kernel code that needs to handle
90 * changes to devices when the CPU clock speed changes.
91 * The mutex locks both lists.
93 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
94 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
96 static bool init_cpufreq_transition_notifier_list_called
;
97 static int __init
init_cpufreq_transition_notifier_list(void)
99 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
100 init_cpufreq_transition_notifier_list_called
= true;
103 pure_initcall(init_cpufreq_transition_notifier_list
);
105 static int off __read_mostly
;
106 static int cpufreq_disabled(void)
110 void disable_cpufreq(void)
114 static DEFINE_MUTEX(cpufreq_governor_mutex
);
116 bool have_governor_per_policy(void)
118 return !!(cpufreq_driver
->flags
& CPUFREQ_HAVE_GOVERNOR_PER_POLICY
);
120 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
122 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
124 if (have_governor_per_policy())
125 return &policy
->kobj
;
127 return cpufreq_global_kobject
;
129 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
131 struct cpufreq_frequency_table
*cpufreq_frequency_get_table(unsigned int cpu
)
133 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
135 return policy
&& !policy_is_inactive(policy
) ?
136 policy
->freq_table
: NULL
;
138 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table
);
140 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
146 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
148 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
149 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
150 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
151 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
152 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
153 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
155 idle_time
= cur_wall_time
- busy_time
;
157 *wall
= cputime_to_usecs(cur_wall_time
);
159 return cputime_to_usecs(idle_time
);
162 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
164 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
166 if (idle_time
== -1ULL)
167 return get_cpu_idle_time_jiffy(cpu
, wall
);
169 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
173 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
176 * This is a generic cpufreq init() routine which can be used by cpufreq
177 * drivers of SMP systems. It will do following:
178 * - validate & show freq table passed
179 * - set policies transition latency
180 * - policy->cpus with all possible CPUs
182 int cpufreq_generic_init(struct cpufreq_policy
*policy
,
183 struct cpufreq_frequency_table
*table
,
184 unsigned int transition_latency
)
188 ret
= cpufreq_table_validate_and_show(policy
, table
);
190 pr_err("%s: invalid frequency table: %d\n", __func__
, ret
);
194 policy
->cpuinfo
.transition_latency
= transition_latency
;
197 * The driver only supports the SMP configuration where all processors
198 * share the clock and voltage and clock.
200 cpumask_setall(policy
->cpus
);
204 EXPORT_SYMBOL_GPL(cpufreq_generic_init
);
206 struct cpufreq_policy
*cpufreq_cpu_get_raw(unsigned int cpu
)
208 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
210 return policy
&& cpumask_test_cpu(cpu
, policy
->cpus
) ? policy
: NULL
;
212 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw
);
214 unsigned int cpufreq_generic_get(unsigned int cpu
)
216 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(cpu
);
218 if (!policy
|| IS_ERR(policy
->clk
)) {
219 pr_err("%s: No %s associated to cpu: %d\n",
220 __func__
, policy
? "clk" : "policy", cpu
);
224 return clk_get_rate(policy
->clk
) / 1000;
226 EXPORT_SYMBOL_GPL(cpufreq_generic_get
);
229 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
231 * @cpu: cpu to find policy for.
233 * This returns policy for 'cpu', returns NULL if it doesn't exist.
234 * It also increments the kobject reference count to mark it busy and so would
235 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
236 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
237 * freed as that depends on the kobj count.
239 * Return: A valid policy on success, otherwise NULL on failure.
241 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
243 struct cpufreq_policy
*policy
= NULL
;
246 if (WARN_ON(cpu
>= nr_cpu_ids
))
249 /* get the cpufreq driver */
250 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
252 if (cpufreq_driver
) {
254 policy
= cpufreq_cpu_get_raw(cpu
);
256 kobject_get(&policy
->kobj
);
259 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
263 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
266 * cpufreq_cpu_put: Decrements the usage count of a policy
268 * @policy: policy earlier returned by cpufreq_cpu_get().
270 * This decrements the kobject reference count incremented earlier by calling
273 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
275 kobject_put(&policy
->kobj
);
277 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
279 /*********************************************************************
280 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
281 *********************************************************************/
284 * adjust_jiffies - adjust the system "loops_per_jiffy"
286 * This function alters the system "loops_per_jiffy" for the clock
287 * speed change. Note that loops_per_jiffy cannot be updated on SMP
288 * systems as each CPU might be scaled differently. So, use the arch
289 * per-CPU loops_per_jiffy value wherever possible.
291 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
294 static unsigned long l_p_j_ref
;
295 static unsigned int l_p_j_ref_freq
;
297 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
300 if (!l_p_j_ref_freq
) {
301 l_p_j_ref
= loops_per_jiffy
;
302 l_p_j_ref_freq
= ci
->old
;
303 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
304 l_p_j_ref
, l_p_j_ref_freq
);
306 if (val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) {
307 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
309 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
310 loops_per_jiffy
, ci
->new);
315 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
316 struct cpufreq_freqs
*freqs
, unsigned int state
)
318 BUG_ON(irqs_disabled());
320 if (cpufreq_disabled())
323 freqs
->flags
= cpufreq_driver
->flags
;
324 pr_debug("notification %u of frequency transition to %u kHz\n",
329 case CPUFREQ_PRECHANGE
:
330 /* detect if the driver reported a value as "old frequency"
331 * which is not equal to what the cpufreq core thinks is
334 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
335 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
336 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
337 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
338 freqs
->old
, policy
->cur
);
339 freqs
->old
= policy
->cur
;
342 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
343 CPUFREQ_PRECHANGE
, freqs
);
344 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
347 case CPUFREQ_POSTCHANGE
:
348 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
349 pr_debug("FREQ: %lu - CPU: %lu\n",
350 (unsigned long)freqs
->new, (unsigned long)freqs
->cpu
);
351 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
352 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
353 CPUFREQ_POSTCHANGE
, freqs
);
354 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
355 policy
->cur
= freqs
->new;
361 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
362 * on frequency transition.
364 * This function calls the transition notifiers and the "adjust_jiffies"
365 * function. It is called twice on all CPU frequency changes that have
368 static void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
369 struct cpufreq_freqs
*freqs
, unsigned int state
)
371 for_each_cpu(freqs
->cpu
, policy
->cpus
)
372 __cpufreq_notify_transition(policy
, freqs
, state
);
375 /* Do post notifications when there are chances that transition has failed */
376 static void cpufreq_notify_post_transition(struct cpufreq_policy
*policy
,
377 struct cpufreq_freqs
*freqs
, int transition_failed
)
379 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
380 if (!transition_failed
)
383 swap(freqs
->old
, freqs
->new);
384 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
385 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
388 void cpufreq_freq_transition_begin(struct cpufreq_policy
*policy
,
389 struct cpufreq_freqs
*freqs
)
393 * Catch double invocations of _begin() which lead to self-deadlock.
394 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
395 * doesn't invoke _begin() on their behalf, and hence the chances of
396 * double invocations are very low. Moreover, there are scenarios
397 * where these checks can emit false-positive warnings in these
398 * drivers; so we avoid that by skipping them altogether.
400 WARN_ON(!(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
)
401 && current
== policy
->transition_task
);
404 wait_event(policy
->transition_wait
, !policy
->transition_ongoing
);
406 spin_lock(&policy
->transition_lock
);
408 if (unlikely(policy
->transition_ongoing
)) {
409 spin_unlock(&policy
->transition_lock
);
413 policy
->transition_ongoing
= true;
414 policy
->transition_task
= current
;
416 spin_unlock(&policy
->transition_lock
);
418 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
420 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin
);
422 void cpufreq_freq_transition_end(struct cpufreq_policy
*policy
,
423 struct cpufreq_freqs
*freqs
, int transition_failed
)
425 if (unlikely(WARN_ON(!policy
->transition_ongoing
)))
428 cpufreq_notify_post_transition(policy
, freqs
, transition_failed
);
430 policy
->transition_ongoing
= false;
431 policy
->transition_task
= NULL
;
433 wake_up(&policy
->transition_wait
);
435 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end
);
438 * Fast frequency switching status count. Positive means "enabled", negative
439 * means "disabled" and 0 means "not decided yet".
441 static int cpufreq_fast_switch_count
;
442 static DEFINE_MUTEX(cpufreq_fast_switch_lock
);
444 static void cpufreq_list_transition_notifiers(void)
446 struct notifier_block
*nb
;
448 pr_info("Registered transition notifiers:\n");
450 mutex_lock(&cpufreq_transition_notifier_list
.mutex
);
452 for (nb
= cpufreq_transition_notifier_list
.head
; nb
; nb
= nb
->next
)
453 pr_info("%pF\n", nb
->notifier_call
);
455 mutex_unlock(&cpufreq_transition_notifier_list
.mutex
);
459 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
460 * @policy: cpufreq policy to enable fast frequency switching for.
462 * Try to enable fast frequency switching for @policy.
464 * The attempt will fail if there is at least one transition notifier registered
465 * at this point, as fast frequency switching is quite fundamentally at odds
466 * with transition notifiers. Thus if successful, it will make registration of
467 * transition notifiers fail going forward.
469 void cpufreq_enable_fast_switch(struct cpufreq_policy
*policy
)
471 lockdep_assert_held(&policy
->rwsem
);
473 if (!policy
->fast_switch_possible
)
476 mutex_lock(&cpufreq_fast_switch_lock
);
477 if (cpufreq_fast_switch_count
>= 0) {
478 cpufreq_fast_switch_count
++;
479 policy
->fast_switch_enabled
= true;
481 pr_warn("CPU%u: Fast frequency switching not enabled\n",
483 cpufreq_list_transition_notifiers();
485 mutex_unlock(&cpufreq_fast_switch_lock
);
487 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch
);
490 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
491 * @policy: cpufreq policy to disable fast frequency switching for.
493 void cpufreq_disable_fast_switch(struct cpufreq_policy
*policy
)
495 mutex_lock(&cpufreq_fast_switch_lock
);
496 if (policy
->fast_switch_enabled
) {
497 policy
->fast_switch_enabled
= false;
498 if (!WARN_ON(cpufreq_fast_switch_count
<= 0))
499 cpufreq_fast_switch_count
--;
501 mutex_unlock(&cpufreq_fast_switch_lock
);
503 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch
);
505 /*********************************************************************
507 *********************************************************************/
508 static ssize_t
show_boost(struct kobject
*kobj
,
509 struct attribute
*attr
, char *buf
)
511 return sprintf(buf
, "%d\n", cpufreq_driver
->boost_enabled
);
514 static ssize_t
store_boost(struct kobject
*kobj
, struct attribute
*attr
,
515 const char *buf
, size_t count
)
519 ret
= sscanf(buf
, "%d", &enable
);
520 if (ret
!= 1 || enable
< 0 || enable
> 1)
523 if (cpufreq_boost_trigger_state(enable
)) {
524 pr_err("%s: Cannot %s BOOST!\n",
525 __func__
, enable
? "enable" : "disable");
529 pr_debug("%s: cpufreq BOOST %s\n",
530 __func__
, enable
? "enabled" : "disabled");
534 define_one_global_rw(boost
);
536 static struct cpufreq_governor
*find_governor(const char *str_governor
)
538 struct cpufreq_governor
*t
;
541 if (!strncasecmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
548 * cpufreq_parse_governor - parse a governor string
550 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
551 struct cpufreq_governor
**governor
)
555 if (cpufreq_driver
->setpolicy
) {
556 if (!strncasecmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
557 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
559 } else if (!strncasecmp(str_governor
, "powersave",
561 *policy
= CPUFREQ_POLICY_POWERSAVE
;
565 struct cpufreq_governor
*t
;
567 mutex_lock(&cpufreq_governor_mutex
);
569 t
= find_governor(str_governor
);
574 mutex_unlock(&cpufreq_governor_mutex
);
575 ret
= request_module("cpufreq_%s", str_governor
);
576 mutex_lock(&cpufreq_governor_mutex
);
579 t
= find_governor(str_governor
);
587 mutex_unlock(&cpufreq_governor_mutex
);
593 * cpufreq_per_cpu_attr_read() / show_##file_name() -
594 * print out cpufreq information
596 * Write out information from cpufreq_driver->policy[cpu]; object must be
600 #define show_one(file_name, object) \
601 static ssize_t show_##file_name \
602 (struct cpufreq_policy *policy, char *buf) \
604 return sprintf(buf, "%u\n", policy->object); \
607 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
608 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
609 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
610 show_one(scaling_min_freq
, min
);
611 show_one(scaling_max_freq
, max
);
613 static ssize_t
show_scaling_cur_freq(struct cpufreq_policy
*policy
, char *buf
)
617 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
618 ret
= sprintf(buf
, "%u\n", cpufreq_driver
->get(policy
->cpu
));
620 ret
= sprintf(buf
, "%u\n", policy
->cur
);
624 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
625 struct cpufreq_policy
*new_policy
);
628 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
630 #define store_one(file_name, object) \
631 static ssize_t store_##file_name \
632 (struct cpufreq_policy *policy, const char *buf, size_t count) \
635 struct cpufreq_policy new_policy; \
637 memcpy(&new_policy, policy, sizeof(*policy)); \
639 ret = sscanf(buf, "%u", &new_policy.object); \
643 temp = new_policy.object; \
644 ret = cpufreq_set_policy(policy, &new_policy); \
646 policy->user_policy.object = temp; \
648 return ret ? ret : count; \
651 store_one(scaling_min_freq
, min
);
652 store_one(scaling_max_freq
, max
);
655 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
657 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
660 unsigned int cur_freq
= __cpufreq_get(policy
);
662 return sprintf(buf
, "<unknown>");
663 return sprintf(buf
, "%u\n", cur_freq
);
667 * show_scaling_governor - show the current policy for the specified CPU
669 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
671 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
672 return sprintf(buf
, "powersave\n");
673 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
674 return sprintf(buf
, "performance\n");
675 else if (policy
->governor
)
676 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
677 policy
->governor
->name
);
682 * store_scaling_governor - store policy for the specified CPU
684 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
685 const char *buf
, size_t count
)
688 char str_governor
[16];
689 struct cpufreq_policy new_policy
;
691 memcpy(&new_policy
, policy
, sizeof(*policy
));
693 ret
= sscanf(buf
, "%15s", str_governor
);
697 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
698 &new_policy
.governor
))
701 ret
= cpufreq_set_policy(policy
, &new_policy
);
702 return ret
? ret
: count
;
706 * show_scaling_driver - show the cpufreq driver currently loaded
708 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
710 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
714 * show_scaling_available_governors - show the available CPUfreq governors
716 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
720 struct cpufreq_governor
*t
;
723 i
+= sprintf(buf
, "performance powersave");
727 for_each_governor(t
) {
728 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
729 - (CPUFREQ_NAME_LEN
+ 2)))
731 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
734 i
+= sprintf(&buf
[i
], "\n");
738 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
743 for_each_cpu(cpu
, mask
) {
745 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
746 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
747 if (i
>= (PAGE_SIZE
- 5))
750 i
+= sprintf(&buf
[i
], "\n");
753 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
756 * show_related_cpus - show the CPUs affected by each transition even if
757 * hw coordination is in use
759 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
761 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
765 * show_affected_cpus - show the CPUs affected by each transition
767 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
769 return cpufreq_show_cpus(policy
->cpus
, buf
);
772 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
773 const char *buf
, size_t count
)
775 unsigned int freq
= 0;
778 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
781 ret
= sscanf(buf
, "%u", &freq
);
785 policy
->governor
->store_setspeed(policy
, freq
);
790 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
792 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
793 return sprintf(buf
, "<unsupported>\n");
795 return policy
->governor
->show_setspeed(policy
, buf
);
799 * show_bios_limit - show the current cpufreq HW/BIOS limitation
801 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
805 if (cpufreq_driver
->bios_limit
) {
806 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
808 return sprintf(buf
, "%u\n", limit
);
810 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
813 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
814 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
815 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
816 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
817 cpufreq_freq_attr_ro(scaling_available_governors
);
818 cpufreq_freq_attr_ro(scaling_driver
);
819 cpufreq_freq_attr_ro(scaling_cur_freq
);
820 cpufreq_freq_attr_ro(bios_limit
);
821 cpufreq_freq_attr_ro(related_cpus
);
822 cpufreq_freq_attr_ro(affected_cpus
);
823 cpufreq_freq_attr_rw(scaling_min_freq
);
824 cpufreq_freq_attr_rw(scaling_max_freq
);
825 cpufreq_freq_attr_rw(scaling_governor
);
826 cpufreq_freq_attr_rw(scaling_setspeed
);
828 static struct attribute
*default_attrs
[] = {
829 &cpuinfo_min_freq
.attr
,
830 &cpuinfo_max_freq
.attr
,
831 &cpuinfo_transition_latency
.attr
,
832 &scaling_min_freq
.attr
,
833 &scaling_max_freq
.attr
,
836 &scaling_governor
.attr
,
837 &scaling_driver
.attr
,
838 &scaling_available_governors
.attr
,
839 &scaling_setspeed
.attr
,
843 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
844 #define to_attr(a) container_of(a, struct freq_attr, attr)
846 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
848 struct cpufreq_policy
*policy
= to_policy(kobj
);
849 struct freq_attr
*fattr
= to_attr(attr
);
852 down_read(&policy
->rwsem
);
853 ret
= fattr
->show(policy
, buf
);
854 up_read(&policy
->rwsem
);
859 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
860 const char *buf
, size_t count
)
862 struct cpufreq_policy
*policy
= to_policy(kobj
);
863 struct freq_attr
*fattr
= to_attr(attr
);
864 ssize_t ret
= -EINVAL
;
868 if (cpu_online(policy
->cpu
)) {
869 down_write(&policy
->rwsem
);
870 ret
= fattr
->store(policy
, buf
, count
);
871 up_write(&policy
->rwsem
);
879 static void cpufreq_sysfs_release(struct kobject
*kobj
)
881 struct cpufreq_policy
*policy
= to_policy(kobj
);
882 pr_debug("last reference is dropped\n");
883 complete(&policy
->kobj_unregister
);
886 static const struct sysfs_ops sysfs_ops
= {
891 static struct kobj_type ktype_cpufreq
= {
892 .sysfs_ops
= &sysfs_ops
,
893 .default_attrs
= default_attrs
,
894 .release
= cpufreq_sysfs_release
,
897 static int add_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
899 struct device
*cpu_dev
;
901 pr_debug("%s: Adding symlink for CPU: %u\n", __func__
, cpu
);
906 cpu_dev
= get_cpu_device(cpu
);
907 if (WARN_ON(!cpu_dev
))
910 return sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
, "cpufreq");
913 static void remove_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
915 struct device
*cpu_dev
;
917 pr_debug("%s: Removing symlink for CPU: %u\n", __func__
, cpu
);
919 cpu_dev
= get_cpu_device(cpu
);
920 if (WARN_ON(!cpu_dev
))
923 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
926 /* Add/remove symlinks for all related CPUs */
927 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
932 /* Some related CPUs might not be present (physically hotplugged) */
933 for_each_cpu(j
, policy
->real_cpus
) {
934 ret
= add_cpu_dev_symlink(policy
, j
);
942 static void cpufreq_remove_dev_symlink(struct cpufreq_policy
*policy
)
946 /* Some related CPUs might not be present (physically hotplugged) */
947 for_each_cpu(j
, policy
->real_cpus
)
948 remove_cpu_dev_symlink(policy
, j
);
951 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
)
953 struct freq_attr
**drv_attr
;
956 /* set up files for this cpu device */
957 drv_attr
= cpufreq_driver
->attr
;
958 while (drv_attr
&& *drv_attr
) {
959 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
964 if (cpufreq_driver
->get
) {
965 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
970 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
974 if (cpufreq_driver
->bios_limit
) {
975 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
980 return cpufreq_add_dev_symlink(policy
);
983 __weak
struct cpufreq_governor
*cpufreq_default_governor(void)
988 static int cpufreq_init_policy(struct cpufreq_policy
*policy
)
990 struct cpufreq_governor
*gov
= NULL
;
991 struct cpufreq_policy new_policy
;
993 memcpy(&new_policy
, policy
, sizeof(*policy
));
995 /* Update governor of new_policy to the governor used before hotplug */
996 gov
= find_governor(policy
->last_governor
);
998 pr_debug("Restoring governor %s for cpu %d\n",
999 policy
->governor
->name
, policy
->cpu
);
1001 gov
= cpufreq_default_governor();
1006 new_policy
.governor
= gov
;
1008 /* Use the default policy if there is no last_policy. */
1009 if (cpufreq_driver
->setpolicy
) {
1010 if (policy
->last_policy
)
1011 new_policy
.policy
= policy
->last_policy
;
1013 cpufreq_parse_governor(gov
->name
, &new_policy
.policy
,
1016 /* set default policy */
1017 return cpufreq_set_policy(policy
, &new_policy
);
1020 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
1024 /* Has this CPU been taken care of already? */
1025 if (cpumask_test_cpu(cpu
, policy
->cpus
))
1028 down_write(&policy
->rwsem
);
1030 ret
= cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1032 pr_err("%s: Failed to stop governor\n", __func__
);
1037 cpumask_set_cpu(cpu
, policy
->cpus
);
1040 ret
= cpufreq_start_governor(policy
);
1042 pr_err("%s: Failed to start governor\n", __func__
);
1046 up_write(&policy
->rwsem
);
1050 static void handle_update(struct work_struct
*work
)
1052 struct cpufreq_policy
*policy
=
1053 container_of(work
, struct cpufreq_policy
, update
);
1054 unsigned int cpu
= policy
->cpu
;
1055 pr_debug("handle_update for cpu %u called\n", cpu
);
1056 cpufreq_update_policy(cpu
);
1059 static struct cpufreq_policy
*cpufreq_policy_alloc(unsigned int cpu
)
1061 struct device
*dev
= get_cpu_device(cpu
);
1062 struct cpufreq_policy
*policy
;
1068 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
1072 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
1073 goto err_free_policy
;
1075 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
1076 goto err_free_cpumask
;
1078 if (!zalloc_cpumask_var(&policy
->real_cpus
, GFP_KERNEL
))
1079 goto err_free_rcpumask
;
1081 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
,
1082 cpufreq_global_kobject
, "policy%u", cpu
);
1084 pr_err("%s: failed to init policy->kobj: %d\n", __func__
, ret
);
1085 goto err_free_real_cpus
;
1088 INIT_LIST_HEAD(&policy
->policy_list
);
1089 init_rwsem(&policy
->rwsem
);
1090 spin_lock_init(&policy
->transition_lock
);
1091 init_waitqueue_head(&policy
->transition_wait
);
1092 init_completion(&policy
->kobj_unregister
);
1093 INIT_WORK(&policy
->update
, handle_update
);
1099 free_cpumask_var(policy
->real_cpus
);
1101 free_cpumask_var(policy
->related_cpus
);
1103 free_cpumask_var(policy
->cpus
);
1110 static void cpufreq_policy_put_kobj(struct cpufreq_policy
*policy
, bool notify
)
1112 struct kobject
*kobj
;
1113 struct completion
*cmp
;
1116 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1117 CPUFREQ_REMOVE_POLICY
, policy
);
1119 down_write(&policy
->rwsem
);
1120 cpufreq_remove_dev_symlink(policy
);
1121 kobj
= &policy
->kobj
;
1122 cmp
= &policy
->kobj_unregister
;
1123 up_write(&policy
->rwsem
);
1127 * We need to make sure that the underlying kobj is
1128 * actually not referenced anymore by anybody before we
1129 * proceed with unloading.
1131 pr_debug("waiting for dropping of refcount\n");
1132 wait_for_completion(cmp
);
1133 pr_debug("wait complete\n");
1136 static void cpufreq_policy_free(struct cpufreq_policy
*policy
, bool notify
)
1138 unsigned long flags
;
1141 /* Remove policy from list */
1142 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1143 list_del(&policy
->policy_list
);
1145 for_each_cpu(cpu
, policy
->related_cpus
)
1146 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1147 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1149 cpufreq_policy_put_kobj(policy
, notify
);
1150 free_cpumask_var(policy
->real_cpus
);
1151 free_cpumask_var(policy
->related_cpus
);
1152 free_cpumask_var(policy
->cpus
);
1156 static int cpufreq_online(unsigned int cpu
)
1158 struct cpufreq_policy
*policy
;
1160 unsigned long flags
;
1164 pr_debug("%s: bringing CPU%u online\n", __func__
, cpu
);
1166 /* Check if this CPU already has a policy to manage it */
1167 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1169 WARN_ON(!cpumask_test_cpu(cpu
, policy
->related_cpus
));
1170 if (!policy_is_inactive(policy
))
1171 return cpufreq_add_policy_cpu(policy
, cpu
);
1173 /* This is the only online CPU for the policy. Start over. */
1175 down_write(&policy
->rwsem
);
1177 policy
->governor
= NULL
;
1178 up_write(&policy
->rwsem
);
1181 policy
= cpufreq_policy_alloc(cpu
);
1186 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1188 /* call driver. From then on the cpufreq must be able
1189 * to accept all calls to ->verify and ->setpolicy for this CPU
1191 ret
= cpufreq_driver
->init(policy
);
1193 pr_debug("initialization failed\n");
1194 goto out_free_policy
;
1197 down_write(&policy
->rwsem
);
1200 /* related_cpus should at least include policy->cpus. */
1201 cpumask_copy(policy
->related_cpus
, policy
->cpus
);
1202 /* Remember CPUs present at the policy creation time. */
1203 cpumask_and(policy
->real_cpus
, policy
->cpus
, cpu_present_mask
);
1207 * affected cpus must always be the one, which are online. We aren't
1208 * managing offline cpus here.
1210 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1213 policy
->user_policy
.min
= policy
->min
;
1214 policy
->user_policy
.max
= policy
->max
;
1216 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1217 for_each_cpu(j
, policy
->related_cpus
)
1218 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1219 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1222 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
1223 policy
->cur
= cpufreq_driver
->get(policy
->cpu
);
1225 pr_err("%s: ->get() failed\n", __func__
);
1226 goto out_exit_policy
;
1231 * Sometimes boot loaders set CPU frequency to a value outside of
1232 * frequency table present with cpufreq core. In such cases CPU might be
1233 * unstable if it has to run on that frequency for long duration of time
1234 * and so its better to set it to a frequency which is specified in
1235 * freq-table. This also makes cpufreq stats inconsistent as
1236 * cpufreq-stats would fail to register because current frequency of CPU
1237 * isn't found in freq-table.
1239 * Because we don't want this change to effect boot process badly, we go
1240 * for the next freq which is >= policy->cur ('cur' must be set by now,
1241 * otherwise we will end up setting freq to lowest of the table as 'cur'
1242 * is initialized to zero).
1244 * We are passing target-freq as "policy->cur - 1" otherwise
1245 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1246 * equal to target-freq.
1248 if ((cpufreq_driver
->flags
& CPUFREQ_NEED_INITIAL_FREQ_CHECK
)
1250 /* Are we running at unknown frequency ? */
1251 ret
= cpufreq_frequency_table_get_index(policy
, policy
->cur
);
1252 if (ret
== -EINVAL
) {
1253 /* Warn user and fix it */
1254 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1255 __func__
, policy
->cpu
, policy
->cur
);
1256 ret
= __cpufreq_driver_target(policy
, policy
->cur
- 1,
1257 CPUFREQ_RELATION_L
);
1260 * Reaching here after boot in a few seconds may not
1261 * mean that system will remain stable at "unknown"
1262 * frequency for longer duration. Hence, a BUG_ON().
1265 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1266 __func__
, policy
->cpu
, policy
->cur
);
1270 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1271 CPUFREQ_START
, policy
);
1274 ret
= cpufreq_add_dev_interface(policy
);
1276 goto out_exit_policy
;
1277 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1278 CPUFREQ_CREATE_POLICY
, policy
);
1280 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1281 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1282 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1285 ret
= cpufreq_init_policy(policy
);
1287 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1288 __func__
, cpu
, ret
);
1289 /* cpufreq_policy_free() will notify based on this */
1291 goto out_exit_policy
;
1294 up_write(&policy
->rwsem
);
1296 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1298 /* Callback for handling stuff after policy is ready */
1299 if (cpufreq_driver
->ready
)
1300 cpufreq_driver
->ready(policy
);
1302 pr_debug("initialization complete\n");
1307 up_write(&policy
->rwsem
);
1309 if (cpufreq_driver
->exit
)
1310 cpufreq_driver
->exit(policy
);
1312 cpufreq_policy_free(policy
, !new_policy
);
1317 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1319 * @sif: Subsystem interface structure pointer (not used)
1321 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1323 struct cpufreq_policy
*policy
;
1324 unsigned cpu
= dev
->id
;
1326 dev_dbg(dev
, "%s: adding CPU%u\n", __func__
, cpu
);
1328 if (cpu_online(cpu
))
1329 return cpufreq_online(cpu
);
1332 * A hotplug notifier will follow and we will handle it as CPU online
1333 * then. For now, just create the sysfs link, unless there is no policy
1334 * or the link is already present.
1336 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1337 if (!policy
|| cpumask_test_and_set_cpu(cpu
, policy
->real_cpus
))
1340 return add_cpu_dev_symlink(policy
, cpu
);
1343 static void cpufreq_offline(unsigned int cpu
)
1345 struct cpufreq_policy
*policy
;
1348 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1350 policy
= cpufreq_cpu_get_raw(cpu
);
1352 pr_debug("%s: No cpu_data found\n", __func__
);
1356 down_write(&policy
->rwsem
);
1358 ret
= cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1360 pr_err("%s: Failed to stop governor\n", __func__
);
1363 cpumask_clear_cpu(cpu
, policy
->cpus
);
1365 if (policy_is_inactive(policy
)) {
1367 strncpy(policy
->last_governor
, policy
->governor
->name
,
1370 policy
->last_policy
= policy
->policy
;
1371 } else if (cpu
== policy
->cpu
) {
1372 /* Nominate new CPU */
1373 policy
->cpu
= cpumask_any(policy
->cpus
);
1376 /* Start governor again for active policy */
1377 if (!policy_is_inactive(policy
)) {
1379 ret
= cpufreq_start_governor(policy
);
1381 pr_err("%s: Failed to start governor\n", __func__
);
1387 if (cpufreq_driver
->stop_cpu
)
1388 cpufreq_driver
->stop_cpu(policy
);
1390 /* If cpu is last user of policy, free policy */
1392 ret
= cpufreq_exit_governor(policy
);
1394 pr_err("%s: Failed to exit governor\n", __func__
);
1398 * Perform the ->exit() even during light-weight tear-down,
1399 * since this is a core component, and is essential for the
1400 * subsequent light-weight ->init() to succeed.
1402 if (cpufreq_driver
->exit
) {
1403 cpufreq_driver
->exit(policy
);
1404 policy
->freq_table
= NULL
;
1408 up_write(&policy
->rwsem
);
1412 * cpufreq_remove_dev - remove a CPU device
1414 * Removes the cpufreq interface for a CPU device.
1416 static void cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1418 unsigned int cpu
= dev
->id
;
1419 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1424 if (cpu_online(cpu
))
1425 cpufreq_offline(cpu
);
1427 cpumask_clear_cpu(cpu
, policy
->real_cpus
);
1428 remove_cpu_dev_symlink(policy
, cpu
);
1430 if (cpumask_empty(policy
->real_cpus
))
1431 cpufreq_policy_free(policy
, true);
1435 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1437 * @policy: policy managing CPUs
1438 * @new_freq: CPU frequency the CPU actually runs at
1440 * We adjust to current frequency first, and need to clean up later.
1441 * So either call to cpufreq_update_policy() or schedule handle_update()).
1443 static void cpufreq_out_of_sync(struct cpufreq_policy
*policy
,
1444 unsigned int new_freq
)
1446 struct cpufreq_freqs freqs
;
1448 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1449 policy
->cur
, new_freq
);
1451 freqs
.old
= policy
->cur
;
1452 freqs
.new = new_freq
;
1454 cpufreq_freq_transition_begin(policy
, &freqs
);
1455 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1459 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1462 * This is the last known freq, without actually getting it from the driver.
1463 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1465 unsigned int cpufreq_quick_get(unsigned int cpu
)
1467 struct cpufreq_policy
*policy
;
1468 unsigned int ret_freq
= 0;
1469 unsigned long flags
;
1471 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1473 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
) {
1474 ret_freq
= cpufreq_driver
->get(cpu
);
1475 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1479 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1481 policy
= cpufreq_cpu_get(cpu
);
1483 ret_freq
= policy
->cur
;
1484 cpufreq_cpu_put(policy
);
1489 EXPORT_SYMBOL(cpufreq_quick_get
);
1492 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1495 * Just return the max possible frequency for a given CPU.
1497 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1499 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1500 unsigned int ret_freq
= 0;
1503 ret_freq
= policy
->max
;
1504 cpufreq_cpu_put(policy
);
1509 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1511 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
)
1513 unsigned int ret_freq
= 0;
1515 if (!cpufreq_driver
->get
)
1518 ret_freq
= cpufreq_driver
->get(policy
->cpu
);
1521 * Updating inactive policies is invalid, so avoid doing that. Also
1522 * if fast frequency switching is used with the given policy, the check
1523 * against policy->cur is pointless, so skip it in that case too.
1525 if (unlikely(policy_is_inactive(policy
)) || policy
->fast_switch_enabled
)
1528 if (ret_freq
&& policy
->cur
&&
1529 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1530 /* verify no discrepancy between actual and
1531 saved value exists */
1532 if (unlikely(ret_freq
!= policy
->cur
)) {
1533 cpufreq_out_of_sync(policy
, ret_freq
);
1534 schedule_work(&policy
->update
);
1542 * cpufreq_get - get the current CPU frequency (in kHz)
1545 * Get the CPU current (static) CPU frequency
1547 unsigned int cpufreq_get(unsigned int cpu
)
1549 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1550 unsigned int ret_freq
= 0;
1553 down_read(&policy
->rwsem
);
1554 ret_freq
= __cpufreq_get(policy
);
1555 up_read(&policy
->rwsem
);
1557 cpufreq_cpu_put(policy
);
1562 EXPORT_SYMBOL(cpufreq_get
);
1564 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy
*policy
)
1566 unsigned int new_freq
;
1568 if (cpufreq_suspended
)
1571 new_freq
= cpufreq_driver
->get(policy
->cpu
);
1576 pr_debug("cpufreq: Driver did not initialize current freq\n");
1577 policy
->cur
= new_freq
;
1578 } else if (policy
->cur
!= new_freq
&& has_target()) {
1579 cpufreq_out_of_sync(policy
, new_freq
);
1585 static struct subsys_interface cpufreq_interface
= {
1587 .subsys
= &cpu_subsys
,
1588 .add_dev
= cpufreq_add_dev
,
1589 .remove_dev
= cpufreq_remove_dev
,
1593 * In case platform wants some specific frequency to be configured
1596 int cpufreq_generic_suspend(struct cpufreq_policy
*policy
)
1600 if (!policy
->suspend_freq
) {
1601 pr_debug("%s: suspend_freq not defined\n", __func__
);
1605 pr_debug("%s: Setting suspend-freq: %u\n", __func__
,
1606 policy
->suspend_freq
);
1608 ret
= __cpufreq_driver_target(policy
, policy
->suspend_freq
,
1609 CPUFREQ_RELATION_H
);
1611 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1612 __func__
, policy
->suspend_freq
, ret
);
1616 EXPORT_SYMBOL(cpufreq_generic_suspend
);
1619 * cpufreq_suspend() - Suspend CPUFreq governors
1621 * Called during system wide Suspend/Hibernate cycles for suspending governors
1622 * as some platforms can't change frequency after this point in suspend cycle.
1623 * Because some of the devices (like: i2c, regulators, etc) they use for
1624 * changing frequency are suspended quickly after this point.
1626 void cpufreq_suspend(void)
1628 struct cpufreq_policy
*policy
;
1631 if (!cpufreq_driver
)
1634 if (!has_target() && !cpufreq_driver
->suspend
)
1637 pr_debug("%s: Suspending Governors\n", __func__
);
1639 for_each_active_policy(policy
) {
1641 down_write(&policy
->rwsem
);
1642 ret
= cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1643 up_write(&policy
->rwsem
);
1646 pr_err("%s: Failed to stop governor for policy: %p\n",
1652 if (cpufreq_driver
->suspend
&& cpufreq_driver
->suspend(policy
))
1653 pr_err("%s: Failed to suspend driver: %p\n", __func__
,
1658 cpufreq_suspended
= true;
1662 * cpufreq_resume() - Resume CPUFreq governors
1664 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1665 * are suspended with cpufreq_suspend().
1667 void cpufreq_resume(void)
1669 struct cpufreq_policy
*policy
;
1672 if (!cpufreq_driver
)
1675 cpufreq_suspended
= false;
1677 if (!has_target() && !cpufreq_driver
->resume
)
1680 pr_debug("%s: Resuming Governors\n", __func__
);
1682 for_each_active_policy(policy
) {
1683 if (cpufreq_driver
->resume
&& cpufreq_driver
->resume(policy
)) {
1684 pr_err("%s: Failed to resume driver: %p\n", __func__
,
1686 } else if (has_target()) {
1687 down_write(&policy
->rwsem
);
1688 ret
= cpufreq_start_governor(policy
);
1689 up_write(&policy
->rwsem
);
1692 pr_err("%s: Failed to start governor for policy: %p\n",
1699 * cpufreq_get_current_driver - return current driver's name
1701 * Return the name string of the currently loaded cpufreq driver
1704 const char *cpufreq_get_current_driver(void)
1707 return cpufreq_driver
->name
;
1711 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1714 * cpufreq_get_driver_data - return current driver data
1716 * Return the private data of the currently loaded cpufreq
1717 * driver, or NULL if no cpufreq driver is loaded.
1719 void *cpufreq_get_driver_data(void)
1722 return cpufreq_driver
->driver_data
;
1726 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data
);
1728 /*********************************************************************
1729 * NOTIFIER LISTS INTERFACE *
1730 *********************************************************************/
1733 * cpufreq_register_notifier - register a driver with cpufreq
1734 * @nb: notifier function to register
1735 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1737 * Add a driver to one of two lists: either a list of drivers that
1738 * are notified about clock rate changes (once before and once after
1739 * the transition), or a list of drivers that are notified about
1740 * changes in cpufreq policy.
1742 * This function may sleep, and has the same return conditions as
1743 * blocking_notifier_chain_register.
1745 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1749 if (cpufreq_disabled())
1752 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1755 case CPUFREQ_TRANSITION_NOTIFIER
:
1756 mutex_lock(&cpufreq_fast_switch_lock
);
1758 if (cpufreq_fast_switch_count
> 0) {
1759 mutex_unlock(&cpufreq_fast_switch_lock
);
1762 ret
= srcu_notifier_chain_register(
1763 &cpufreq_transition_notifier_list
, nb
);
1765 cpufreq_fast_switch_count
--;
1767 mutex_unlock(&cpufreq_fast_switch_lock
);
1769 case CPUFREQ_POLICY_NOTIFIER
:
1770 ret
= blocking_notifier_chain_register(
1771 &cpufreq_policy_notifier_list
, nb
);
1779 EXPORT_SYMBOL(cpufreq_register_notifier
);
1782 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1783 * @nb: notifier block to be unregistered
1784 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1786 * Remove a driver from the CPU frequency notifier list.
1788 * This function may sleep, and has the same return conditions as
1789 * blocking_notifier_chain_unregister.
1791 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1795 if (cpufreq_disabled())
1799 case CPUFREQ_TRANSITION_NOTIFIER
:
1800 mutex_lock(&cpufreq_fast_switch_lock
);
1802 ret
= srcu_notifier_chain_unregister(
1803 &cpufreq_transition_notifier_list
, nb
);
1804 if (!ret
&& !WARN_ON(cpufreq_fast_switch_count
>= 0))
1805 cpufreq_fast_switch_count
++;
1807 mutex_unlock(&cpufreq_fast_switch_lock
);
1809 case CPUFREQ_POLICY_NOTIFIER
:
1810 ret
= blocking_notifier_chain_unregister(
1811 &cpufreq_policy_notifier_list
, nb
);
1819 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1822 /*********************************************************************
1824 *********************************************************************/
1827 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1828 * @policy: cpufreq policy to switch the frequency for.
1829 * @target_freq: New frequency to set (may be approximate).
1831 * Carry out a fast frequency switch without sleeping.
1833 * The driver's ->fast_switch() callback invoked by this function must be
1834 * suitable for being called from within RCU-sched read-side critical sections
1835 * and it is expected to select the minimum available frequency greater than or
1836 * equal to @target_freq (CPUFREQ_RELATION_L).
1838 * This function must not be called if policy->fast_switch_enabled is unset.
1840 * Governors calling this function must guarantee that it will never be invoked
1841 * twice in parallel for the same policy and that it will never be called in
1842 * parallel with either ->target() or ->target_index() for the same policy.
1844 * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
1845 * callback to indicate an error condition, the hardware configuration must be
1848 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy
*policy
,
1849 unsigned int target_freq
)
1851 clamp_val(target_freq
, policy
->min
, policy
->max
);
1853 return cpufreq_driver
->fast_switch(policy
, target_freq
);
1855 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch
);
1857 /* Must set freqs->new to intermediate frequency */
1858 static int __target_intermediate(struct cpufreq_policy
*policy
,
1859 struct cpufreq_freqs
*freqs
, int index
)
1863 freqs
->new = cpufreq_driver
->get_intermediate(policy
, index
);
1865 /* We don't need to switch to intermediate freq */
1869 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1870 __func__
, policy
->cpu
, freqs
->old
, freqs
->new);
1872 cpufreq_freq_transition_begin(policy
, freqs
);
1873 ret
= cpufreq_driver
->target_intermediate(policy
, index
);
1874 cpufreq_freq_transition_end(policy
, freqs
, ret
);
1877 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1883 static int __target_index(struct cpufreq_policy
*policy
,
1884 struct cpufreq_frequency_table
*freq_table
, int index
)
1886 struct cpufreq_freqs freqs
= {.old
= policy
->cur
, .flags
= 0};
1887 unsigned int intermediate_freq
= 0;
1888 int retval
= -EINVAL
;
1891 notify
= !(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
);
1893 /* Handle switching to intermediate frequency */
1894 if (cpufreq_driver
->get_intermediate
) {
1895 retval
= __target_intermediate(policy
, &freqs
, index
);
1899 intermediate_freq
= freqs
.new;
1900 /* Set old freq to intermediate */
1901 if (intermediate_freq
)
1902 freqs
.old
= freqs
.new;
1905 freqs
.new = freq_table
[index
].frequency
;
1906 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1907 __func__
, policy
->cpu
, freqs
.old
, freqs
.new);
1909 cpufreq_freq_transition_begin(policy
, &freqs
);
1912 retval
= cpufreq_driver
->target_index(policy
, index
);
1914 pr_err("%s: Failed to change cpu frequency: %d\n", __func__
,
1918 cpufreq_freq_transition_end(policy
, &freqs
, retval
);
1921 * Failed after setting to intermediate freq? Driver should have
1922 * reverted back to initial frequency and so should we. Check
1923 * here for intermediate_freq instead of get_intermediate, in
1924 * case we haven't switched to intermediate freq at all.
1926 if (unlikely(retval
&& intermediate_freq
)) {
1927 freqs
.old
= intermediate_freq
;
1928 freqs
.new = policy
->restore_freq
;
1929 cpufreq_freq_transition_begin(policy
, &freqs
);
1930 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1937 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1938 unsigned int target_freq
,
1939 unsigned int relation
)
1941 unsigned int old_target_freq
= target_freq
;
1942 struct cpufreq_frequency_table
*freq_table
;
1945 if (cpufreq_disabled())
1948 /* Make sure that target_freq is within supported range */
1949 if (target_freq
> policy
->max
)
1950 target_freq
= policy
->max
;
1951 if (target_freq
< policy
->min
)
1952 target_freq
= policy
->min
;
1954 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1955 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1958 * This might look like a redundant call as we are checking it again
1959 * after finding index. But it is left intentionally for cases where
1960 * exactly same freq is called again and so we can save on few function
1963 if (target_freq
== policy
->cur
)
1966 /* Save last value to restore later on errors */
1967 policy
->restore_freq
= policy
->cur
;
1969 if (cpufreq_driver
->target
)
1970 return cpufreq_driver
->target(policy
, target_freq
, relation
);
1972 if (!cpufreq_driver
->target_index
)
1975 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
1976 if (unlikely(!freq_table
)) {
1977 pr_err("%s: Unable to find freq_table\n", __func__
);
1981 retval
= cpufreq_frequency_table_target(policy
, freq_table
, target_freq
,
1983 if (unlikely(retval
)) {
1984 pr_err("%s: Unable to find matching freq\n", __func__
);
1988 if (freq_table
[index
].frequency
== policy
->cur
)
1991 return __target_index(policy
, freq_table
, index
);
1993 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1995 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1996 unsigned int target_freq
,
1997 unsigned int relation
)
2001 down_write(&policy
->rwsem
);
2003 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
2005 up_write(&policy
->rwsem
);
2009 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
2011 __weak
struct cpufreq_governor
*cpufreq_fallback_governor(void)
2016 static int cpufreq_governor(struct cpufreq_policy
*policy
, unsigned int event
)
2020 /* Don't start any governor operations if we are entering suspend */
2021 if (cpufreq_suspended
)
2024 * Governor might not be initiated here if ACPI _PPC changed
2025 * notification happened, so check it.
2027 if (!policy
->governor
)
2030 if (policy
->governor
->max_transition_latency
&&
2031 policy
->cpuinfo
.transition_latency
>
2032 policy
->governor
->max_transition_latency
) {
2033 struct cpufreq_governor
*gov
= cpufreq_fallback_governor();
2036 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2037 policy
->governor
->name
, gov
->name
);
2038 policy
->governor
= gov
;
2044 if (event
== CPUFREQ_GOV_POLICY_INIT
)
2045 if (!try_module_get(policy
->governor
->owner
))
2048 pr_debug("%s: for CPU %u, event %u\n", __func__
, policy
->cpu
, event
);
2050 ret
= policy
->governor
->governor(policy
, event
);
2053 if (event
== CPUFREQ_GOV_POLICY_INIT
)
2054 policy
->governor
->initialized
++;
2055 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
2056 policy
->governor
->initialized
--;
2059 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
2060 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
2061 module_put(policy
->governor
->owner
);
2066 static int cpufreq_start_governor(struct cpufreq_policy
*policy
)
2070 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
)
2071 cpufreq_update_current_freq(policy
);
2073 ret
= cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2074 return ret
? ret
: cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2077 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
2084 if (cpufreq_disabled())
2087 mutex_lock(&cpufreq_governor_mutex
);
2089 governor
->initialized
= 0;
2091 if (!find_governor(governor
->name
)) {
2093 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
2096 mutex_unlock(&cpufreq_governor_mutex
);
2099 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
2101 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
2103 struct cpufreq_policy
*policy
;
2104 unsigned long flags
;
2109 if (cpufreq_disabled())
2112 /* clear last_governor for all inactive policies */
2113 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
2114 for_each_inactive_policy(policy
) {
2115 if (!strcmp(policy
->last_governor
, governor
->name
)) {
2116 policy
->governor
= NULL
;
2117 strcpy(policy
->last_governor
, "\0");
2120 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2122 mutex_lock(&cpufreq_governor_mutex
);
2123 list_del(&governor
->governor_list
);
2124 mutex_unlock(&cpufreq_governor_mutex
);
2127 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
2130 /*********************************************************************
2131 * POLICY INTERFACE *
2132 *********************************************************************/
2135 * cpufreq_get_policy - get the current cpufreq_policy
2136 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2139 * Reads the current cpufreq policy.
2141 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
2143 struct cpufreq_policy
*cpu_policy
;
2147 cpu_policy
= cpufreq_cpu_get(cpu
);
2151 memcpy(policy
, cpu_policy
, sizeof(*policy
));
2153 cpufreq_cpu_put(cpu_policy
);
2156 EXPORT_SYMBOL(cpufreq_get_policy
);
2159 * policy : current policy.
2160 * new_policy: policy to be set.
2162 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
2163 struct cpufreq_policy
*new_policy
)
2165 struct cpufreq_governor
*old_gov
;
2168 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2169 new_policy
->cpu
, new_policy
->min
, new_policy
->max
);
2171 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
2174 * This check works well when we store new min/max freq attributes,
2175 * because new_policy is a copy of policy with one field updated.
2177 if (new_policy
->min
> new_policy
->max
)
2180 /* verify the cpu speed can be set within this limit */
2181 ret
= cpufreq_driver
->verify(new_policy
);
2185 /* adjust if necessary - all reasons */
2186 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2187 CPUFREQ_ADJUST
, new_policy
);
2190 * verify the cpu speed can be set within this limit, which might be
2191 * different to the first one
2193 ret
= cpufreq_driver
->verify(new_policy
);
2197 /* notification of the new policy */
2198 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2199 CPUFREQ_NOTIFY
, new_policy
);
2201 policy
->min
= new_policy
->min
;
2202 policy
->max
= new_policy
->max
;
2204 pr_debug("new min and max freqs are %u - %u kHz\n",
2205 policy
->min
, policy
->max
);
2207 if (cpufreq_driver
->setpolicy
) {
2208 policy
->policy
= new_policy
->policy
;
2209 pr_debug("setting range\n");
2210 return cpufreq_driver
->setpolicy(new_policy
);
2213 if (new_policy
->governor
== policy
->governor
) {
2214 pr_debug("cpufreq: governor limits update\n");
2215 return cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2218 pr_debug("governor switch\n");
2220 /* save old, working values */
2221 old_gov
= policy
->governor
;
2222 /* end old governor */
2224 ret
= cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
2226 /* This can happen due to race with other operations */
2227 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2228 __func__
, old_gov
->name
, ret
);
2232 ret
= cpufreq_exit_governor(policy
);
2234 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2235 __func__
, old_gov
->name
, ret
);
2240 /* start new governor */
2241 policy
->governor
= new_policy
->governor
;
2242 ret
= cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
);
2244 ret
= cpufreq_start_governor(policy
);
2246 pr_debug("cpufreq: governor change\n");
2249 cpufreq_exit_governor(policy
);
2252 /* new governor failed, so re-start old one */
2253 pr_debug("starting governor %s failed\n", policy
->governor
->name
);
2255 policy
->governor
= old_gov
;
2256 if (cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
))
2257 policy
->governor
= NULL
;
2259 cpufreq_start_governor(policy
);
2266 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2267 * @cpu: CPU which shall be re-evaluated
2269 * Useful for policy notifiers which have different necessities
2270 * at different times.
2272 int cpufreq_update_policy(unsigned int cpu
)
2274 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
2275 struct cpufreq_policy new_policy
;
2281 down_write(&policy
->rwsem
);
2283 pr_debug("updating policy for CPU %u\n", cpu
);
2284 memcpy(&new_policy
, policy
, sizeof(*policy
));
2285 new_policy
.min
= policy
->user_policy
.min
;
2286 new_policy
.max
= policy
->user_policy
.max
;
2289 * BIOS might change freq behind our back
2290 * -> ask driver for current freq and notify governors about a change
2292 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
2293 new_policy
.cur
= cpufreq_update_current_freq(policy
);
2294 if (WARN_ON(!new_policy
.cur
)) {
2300 ret
= cpufreq_set_policy(policy
, &new_policy
);
2303 up_write(&policy
->rwsem
);
2305 cpufreq_cpu_put(policy
);
2308 EXPORT_SYMBOL(cpufreq_update_policy
);
2310 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2311 unsigned long action
, void *hcpu
)
2313 unsigned int cpu
= (unsigned long)hcpu
;
2315 switch (action
& ~CPU_TASKS_FROZEN
) {
2317 case CPU_DOWN_FAILED
:
2318 cpufreq_online(cpu
);
2321 case CPU_DOWN_PREPARE
:
2322 cpufreq_offline(cpu
);
2328 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2329 .notifier_call
= cpufreq_cpu_callback
,
2332 /*********************************************************************
2334 *********************************************************************/
2335 static int cpufreq_boost_set_sw(int state
)
2337 struct cpufreq_frequency_table
*freq_table
;
2338 struct cpufreq_policy
*policy
;
2341 for_each_active_policy(policy
) {
2342 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
2344 ret
= cpufreq_frequency_table_cpuinfo(policy
,
2347 pr_err("%s: Policy frequency update failed\n",
2352 down_write(&policy
->rwsem
);
2353 policy
->user_policy
.max
= policy
->max
;
2354 cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2355 up_write(&policy
->rwsem
);
2362 int cpufreq_boost_trigger_state(int state
)
2364 unsigned long flags
;
2367 if (cpufreq_driver
->boost_enabled
== state
)
2370 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2371 cpufreq_driver
->boost_enabled
= state
;
2372 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2374 ret
= cpufreq_driver
->set_boost(state
);
2376 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2377 cpufreq_driver
->boost_enabled
= !state
;
2378 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2380 pr_err("%s: Cannot %s BOOST\n",
2381 __func__
, state
? "enable" : "disable");
2387 static bool cpufreq_boost_supported(void)
2389 return likely(cpufreq_driver
) && cpufreq_driver
->set_boost
;
2392 static int create_boost_sysfs_file(void)
2396 ret
= sysfs_create_file(cpufreq_global_kobject
, &boost
.attr
);
2398 pr_err("%s: cannot register global BOOST sysfs file\n",
2404 static void remove_boost_sysfs_file(void)
2406 if (cpufreq_boost_supported())
2407 sysfs_remove_file(cpufreq_global_kobject
, &boost
.attr
);
2410 int cpufreq_enable_boost_support(void)
2412 if (!cpufreq_driver
)
2415 if (cpufreq_boost_supported())
2418 cpufreq_driver
->set_boost
= cpufreq_boost_set_sw
;
2420 /* This will get removed on driver unregister */
2421 return create_boost_sysfs_file();
2423 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support
);
2425 int cpufreq_boost_enabled(void)
2427 return cpufreq_driver
->boost_enabled
;
2429 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled
);
2431 /*********************************************************************
2432 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2433 *********************************************************************/
2436 * cpufreq_register_driver - register a CPU Frequency driver
2437 * @driver_data: A struct cpufreq_driver containing the values#
2438 * submitted by the CPU Frequency driver.
2440 * Registers a CPU Frequency driver to this core code. This code
2441 * returns zero on success, -EEXIST when another driver got here first
2442 * (and isn't unregistered in the meantime).
2445 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2447 unsigned long flags
;
2450 if (cpufreq_disabled())
2453 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2454 !(driver_data
->setpolicy
|| driver_data
->target_index
||
2455 driver_data
->target
) ||
2456 (driver_data
->setpolicy
&& (driver_data
->target_index
||
2457 driver_data
->target
)) ||
2458 (!!driver_data
->get_intermediate
!= !!driver_data
->target_intermediate
))
2461 pr_debug("trying to register driver %s\n", driver_data
->name
);
2463 /* Protect against concurrent CPU online/offline. */
2466 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2467 if (cpufreq_driver
) {
2468 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2472 cpufreq_driver
= driver_data
;
2473 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2475 if (driver_data
->setpolicy
)
2476 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2478 if (cpufreq_boost_supported()) {
2479 ret
= create_boost_sysfs_file();
2481 goto err_null_driver
;
2484 ret
= subsys_interface_register(&cpufreq_interface
);
2486 goto err_boost_unreg
;
2488 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
) &&
2489 list_empty(&cpufreq_policy_list
)) {
2490 /* if all ->init() calls failed, unregister */
2491 pr_debug("%s: No CPU initialized for driver %s\n", __func__
,
2496 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2497 pr_debug("driver %s up and running\n", driver_data
->name
);
2504 subsys_interface_unregister(&cpufreq_interface
);
2506 remove_boost_sysfs_file();
2508 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2509 cpufreq_driver
= NULL
;
2510 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2513 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2516 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2518 * Unregister the current CPUFreq driver. Only call this if you have
2519 * the right to do so, i.e. if you have succeeded in initialising before!
2520 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2521 * currently not initialised.
2523 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2525 unsigned long flags
;
2527 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2530 pr_debug("unregistering driver %s\n", driver
->name
);
2532 /* Protect against concurrent cpu hotplug */
2534 subsys_interface_unregister(&cpufreq_interface
);
2535 remove_boost_sysfs_file();
2536 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2538 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2540 cpufreq_driver
= NULL
;
2542 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2547 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2550 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2551 * or mutexes when secondary CPUs are halted.
2553 static struct syscore_ops cpufreq_syscore_ops
= {
2554 .shutdown
= cpufreq_suspend
,
2557 struct kobject
*cpufreq_global_kobject
;
2558 EXPORT_SYMBOL(cpufreq_global_kobject
);
2560 static int __init
cpufreq_core_init(void)
2562 if (cpufreq_disabled())
2565 cpufreq_global_kobject
= kobject_create_and_add("cpufreq", &cpu_subsys
.dev_root
->kobj
);
2566 BUG_ON(!cpufreq_global_kobject
);
2568 register_syscore_ops(&cpufreq_syscore_ops
);
2572 core_initcall(cpufreq_core_init
);