2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list
);
36 static inline bool policy_is_inactive(struct cpufreq_policy
*policy
)
38 return cpumask_empty(policy
->cpus
);
41 static bool suitable_policy(struct cpufreq_policy
*policy
, bool active
)
43 return active
== !policy_is_inactive(policy
);
46 /* Finds Next Acive/Inactive policy */
47 static struct cpufreq_policy
*next_policy(struct cpufreq_policy
*policy
,
51 /* No more policies in the list */
52 if (list_is_last(&policy
->policy_list
, &cpufreq_policy_list
))
55 policy
= list_next_entry(policy
, policy_list
);
56 } while (!suitable_policy(policy
, active
));
61 static struct cpufreq_policy
*first_policy(bool active
)
63 struct cpufreq_policy
*policy
;
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list
))
69 policy
= list_first_entry(&cpufreq_policy_list
, typeof(*policy
),
72 if (!suitable_policy(policy
, active
))
73 policy
= next_policy(policy
, active
);
78 /* Macros to iterate over CPU policies */
79 #define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
82 __policy = next_policy(__policy, __active))
84 #define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86 #define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
89 #define for_each_policy(__policy) \
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
92 /* Iterate over governors */
93 static LIST_HEAD(cpufreq_governor_list
);
94 #define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
98 * The "cpufreq driver" - the arch- or hardware-dependent low
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
102 static struct cpufreq_driver
*cpufreq_driver
;
103 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
104 static DEFINE_RWLOCK(cpufreq_driver_lock
);
106 static DEFINE_PER_CPU(struct update_util_data
*, cpufreq_update_util_data
);
109 * cpufreq_set_update_util_data - Populate the CPU's update_util_data pointer.
110 * @cpu: The CPU to set the pointer for.
111 * @data: New pointer value.
113 * Set and publish the update_util_data pointer for the given CPU. That pointer
114 * points to a struct update_util_data object containing a callback function
115 * to call from cpufreq_update_util(). That function will be called from an RCU
116 * read-side critical section, so it must not sleep.
118 * Callers must use RCU callbacks to free any memory that might be accessed
119 * via the old update_util_data pointer or invoke synchronize_rcu() right after
120 * this function to avoid use-after-free.
122 void cpufreq_set_update_util_data(int cpu
, struct update_util_data
*data
)
124 rcu_assign_pointer(per_cpu(cpufreq_update_util_data
, cpu
), data
);
126 EXPORT_SYMBOL_GPL(cpufreq_set_update_util_data
);
129 * cpufreq_update_util - Take a note about CPU utilization changes.
130 * @time: Current time.
131 * @util: Current utilization.
132 * @max: Utilization ceiling.
134 * This function is called by the scheduler on every invocation of
135 * update_load_avg() on the CPU whose utilization is being updated.
137 void cpufreq_update_util(u64 time
, unsigned long util
, unsigned long max
)
139 struct update_util_data
*data
;
143 data
= rcu_dereference(*this_cpu_ptr(&cpufreq_update_util_data
));
144 if (data
&& data
->func
)
145 data
->func(data
, time
, util
, max
);
150 DEFINE_MUTEX(cpufreq_governor_lock
);
152 /* Flag to suspend/resume CPUFreq governors */
153 static bool cpufreq_suspended
;
155 static inline bool has_target(void)
157 return cpufreq_driver
->target_index
|| cpufreq_driver
->target
;
160 /* internal prototypes */
161 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
163 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
);
164 static void handle_update(struct work_struct
*work
);
167 * Two notifier lists: the "policy" list is involved in the
168 * validation process for a new CPU frequency policy; the
169 * "transition" list for kernel code that needs to handle
170 * changes to devices when the CPU clock speed changes.
171 * The mutex locks both lists.
173 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
174 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
176 static bool init_cpufreq_transition_notifier_list_called
;
177 static int __init
init_cpufreq_transition_notifier_list(void)
179 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
180 init_cpufreq_transition_notifier_list_called
= true;
183 pure_initcall(init_cpufreq_transition_notifier_list
);
185 static int off __read_mostly
;
186 static int cpufreq_disabled(void)
190 void disable_cpufreq(void)
194 static DEFINE_MUTEX(cpufreq_governor_mutex
);
196 bool have_governor_per_policy(void)
198 return !!(cpufreq_driver
->flags
& CPUFREQ_HAVE_GOVERNOR_PER_POLICY
);
200 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
202 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
204 if (have_governor_per_policy())
205 return &policy
->kobj
;
207 return cpufreq_global_kobject
;
209 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
211 struct cpufreq_frequency_table
*cpufreq_frequency_get_table(unsigned int cpu
)
213 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
215 return policy
&& !policy_is_inactive(policy
) ?
216 policy
->freq_table
: NULL
;
218 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table
);
220 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
226 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
228 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
229 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
230 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
231 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
232 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
233 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
235 idle_time
= cur_wall_time
- busy_time
;
237 *wall
= cputime_to_usecs(cur_wall_time
);
239 return cputime_to_usecs(idle_time
);
242 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
244 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
246 if (idle_time
== -1ULL)
247 return get_cpu_idle_time_jiffy(cpu
, wall
);
249 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
253 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
256 * This is a generic cpufreq init() routine which can be used by cpufreq
257 * drivers of SMP systems. It will do following:
258 * - validate & show freq table passed
259 * - set policies transition latency
260 * - policy->cpus with all possible CPUs
262 int cpufreq_generic_init(struct cpufreq_policy
*policy
,
263 struct cpufreq_frequency_table
*table
,
264 unsigned int transition_latency
)
268 ret
= cpufreq_table_validate_and_show(policy
, table
);
270 pr_err("%s: invalid frequency table: %d\n", __func__
, ret
);
274 policy
->cpuinfo
.transition_latency
= transition_latency
;
277 * The driver only supports the SMP configuration where all processors
278 * share the clock and voltage and clock.
280 cpumask_setall(policy
->cpus
);
284 EXPORT_SYMBOL_GPL(cpufreq_generic_init
);
286 struct cpufreq_policy
*cpufreq_cpu_get_raw(unsigned int cpu
)
288 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
290 return policy
&& cpumask_test_cpu(cpu
, policy
->cpus
) ? policy
: NULL
;
292 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw
);
294 unsigned int cpufreq_generic_get(unsigned int cpu
)
296 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(cpu
);
298 if (!policy
|| IS_ERR(policy
->clk
)) {
299 pr_err("%s: No %s associated to cpu: %d\n",
300 __func__
, policy
? "clk" : "policy", cpu
);
304 return clk_get_rate(policy
->clk
) / 1000;
306 EXPORT_SYMBOL_GPL(cpufreq_generic_get
);
309 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
311 * @cpu: cpu to find policy for.
313 * This returns policy for 'cpu', returns NULL if it doesn't exist.
314 * It also increments the kobject reference count to mark it busy and so would
315 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
316 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
317 * freed as that depends on the kobj count.
319 * Return: A valid policy on success, otherwise NULL on failure.
321 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
323 struct cpufreq_policy
*policy
= NULL
;
326 if (WARN_ON(cpu
>= nr_cpu_ids
))
329 /* get the cpufreq driver */
330 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
332 if (cpufreq_driver
) {
334 policy
= cpufreq_cpu_get_raw(cpu
);
336 kobject_get(&policy
->kobj
);
339 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
343 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
346 * cpufreq_cpu_put: Decrements the usage count of a policy
348 * @policy: policy earlier returned by cpufreq_cpu_get().
350 * This decrements the kobject reference count incremented earlier by calling
353 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
355 kobject_put(&policy
->kobj
);
357 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
359 /*********************************************************************
360 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
361 *********************************************************************/
364 * adjust_jiffies - adjust the system "loops_per_jiffy"
366 * This function alters the system "loops_per_jiffy" for the clock
367 * speed change. Note that loops_per_jiffy cannot be updated on SMP
368 * systems as each CPU might be scaled differently. So, use the arch
369 * per-CPU loops_per_jiffy value wherever possible.
371 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
374 static unsigned long l_p_j_ref
;
375 static unsigned int l_p_j_ref_freq
;
377 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
380 if (!l_p_j_ref_freq
) {
381 l_p_j_ref
= loops_per_jiffy
;
382 l_p_j_ref_freq
= ci
->old
;
383 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
384 l_p_j_ref
, l_p_j_ref_freq
);
386 if (val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) {
387 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
389 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
390 loops_per_jiffy
, ci
->new);
395 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
396 struct cpufreq_freqs
*freqs
, unsigned int state
)
398 BUG_ON(irqs_disabled());
400 if (cpufreq_disabled())
403 freqs
->flags
= cpufreq_driver
->flags
;
404 pr_debug("notification %u of frequency transition to %u kHz\n",
409 case CPUFREQ_PRECHANGE
:
410 /* detect if the driver reported a value as "old frequency"
411 * which is not equal to what the cpufreq core thinks is
414 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
415 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
416 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
417 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
418 freqs
->old
, policy
->cur
);
419 freqs
->old
= policy
->cur
;
422 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
423 CPUFREQ_PRECHANGE
, freqs
);
424 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
427 case CPUFREQ_POSTCHANGE
:
428 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
429 pr_debug("FREQ: %lu - CPU: %lu\n",
430 (unsigned long)freqs
->new, (unsigned long)freqs
->cpu
);
431 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
432 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
433 CPUFREQ_POSTCHANGE
, freqs
);
434 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
435 policy
->cur
= freqs
->new;
441 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
442 * on frequency transition.
444 * This function calls the transition notifiers and the "adjust_jiffies"
445 * function. It is called twice on all CPU frequency changes that have
448 static void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
449 struct cpufreq_freqs
*freqs
, unsigned int state
)
451 for_each_cpu(freqs
->cpu
, policy
->cpus
)
452 __cpufreq_notify_transition(policy
, freqs
, state
);
455 /* Do post notifications when there are chances that transition has failed */
456 static void cpufreq_notify_post_transition(struct cpufreq_policy
*policy
,
457 struct cpufreq_freqs
*freqs
, int transition_failed
)
459 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
460 if (!transition_failed
)
463 swap(freqs
->old
, freqs
->new);
464 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
465 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
468 void cpufreq_freq_transition_begin(struct cpufreq_policy
*policy
,
469 struct cpufreq_freqs
*freqs
)
473 * Catch double invocations of _begin() which lead to self-deadlock.
474 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
475 * doesn't invoke _begin() on their behalf, and hence the chances of
476 * double invocations are very low. Moreover, there are scenarios
477 * where these checks can emit false-positive warnings in these
478 * drivers; so we avoid that by skipping them altogether.
480 WARN_ON(!(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
)
481 && current
== policy
->transition_task
);
484 wait_event(policy
->transition_wait
, !policy
->transition_ongoing
);
486 spin_lock(&policy
->transition_lock
);
488 if (unlikely(policy
->transition_ongoing
)) {
489 spin_unlock(&policy
->transition_lock
);
493 policy
->transition_ongoing
= true;
494 policy
->transition_task
= current
;
496 spin_unlock(&policy
->transition_lock
);
498 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
500 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin
);
502 void cpufreq_freq_transition_end(struct cpufreq_policy
*policy
,
503 struct cpufreq_freqs
*freqs
, int transition_failed
)
505 if (unlikely(WARN_ON(!policy
->transition_ongoing
)))
508 cpufreq_notify_post_transition(policy
, freqs
, transition_failed
);
510 policy
->transition_ongoing
= false;
511 policy
->transition_task
= NULL
;
513 wake_up(&policy
->transition_wait
);
515 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end
);
518 /*********************************************************************
520 *********************************************************************/
521 static ssize_t
show_boost(struct kobject
*kobj
,
522 struct attribute
*attr
, char *buf
)
524 return sprintf(buf
, "%d\n", cpufreq_driver
->boost_enabled
);
527 static ssize_t
store_boost(struct kobject
*kobj
, struct attribute
*attr
,
528 const char *buf
, size_t count
)
532 ret
= sscanf(buf
, "%d", &enable
);
533 if (ret
!= 1 || enable
< 0 || enable
> 1)
536 if (cpufreq_boost_trigger_state(enable
)) {
537 pr_err("%s: Cannot %s BOOST!\n",
538 __func__
, enable
? "enable" : "disable");
542 pr_debug("%s: cpufreq BOOST %s\n",
543 __func__
, enable
? "enabled" : "disabled");
547 define_one_global_rw(boost
);
549 static struct cpufreq_governor
*find_governor(const char *str_governor
)
551 struct cpufreq_governor
*t
;
554 if (!strncasecmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
561 * cpufreq_parse_governor - parse a governor string
563 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
564 struct cpufreq_governor
**governor
)
568 if (cpufreq_driver
->setpolicy
) {
569 if (!strncasecmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
570 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
572 } else if (!strncasecmp(str_governor
, "powersave",
574 *policy
= CPUFREQ_POLICY_POWERSAVE
;
578 struct cpufreq_governor
*t
;
580 mutex_lock(&cpufreq_governor_mutex
);
582 t
= find_governor(str_governor
);
587 mutex_unlock(&cpufreq_governor_mutex
);
588 ret
= request_module("cpufreq_%s", str_governor
);
589 mutex_lock(&cpufreq_governor_mutex
);
592 t
= find_governor(str_governor
);
600 mutex_unlock(&cpufreq_governor_mutex
);
606 * cpufreq_per_cpu_attr_read() / show_##file_name() -
607 * print out cpufreq information
609 * Write out information from cpufreq_driver->policy[cpu]; object must be
613 #define show_one(file_name, object) \
614 static ssize_t show_##file_name \
615 (struct cpufreq_policy *policy, char *buf) \
617 return sprintf(buf, "%u\n", policy->object); \
620 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
621 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
622 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
623 show_one(scaling_min_freq
, min
);
624 show_one(scaling_max_freq
, max
);
626 static ssize_t
show_scaling_cur_freq(struct cpufreq_policy
*policy
, char *buf
)
630 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
631 ret
= sprintf(buf
, "%u\n", cpufreq_driver
->get(policy
->cpu
));
633 ret
= sprintf(buf
, "%u\n", policy
->cur
);
637 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
638 struct cpufreq_policy
*new_policy
);
641 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
643 #define store_one(file_name, object) \
644 static ssize_t store_##file_name \
645 (struct cpufreq_policy *policy, const char *buf, size_t count) \
648 struct cpufreq_policy new_policy; \
650 memcpy(&new_policy, policy, sizeof(*policy)); \
652 ret = sscanf(buf, "%u", &new_policy.object); \
656 temp = new_policy.object; \
657 ret = cpufreq_set_policy(policy, &new_policy); \
659 policy->user_policy.object = temp; \
661 return ret ? ret : count; \
664 store_one(scaling_min_freq
, min
);
665 store_one(scaling_max_freq
, max
);
668 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
670 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
673 unsigned int cur_freq
= __cpufreq_get(policy
);
675 return sprintf(buf
, "<unknown>");
676 return sprintf(buf
, "%u\n", cur_freq
);
680 * show_scaling_governor - show the current policy for the specified CPU
682 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
684 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
685 return sprintf(buf
, "powersave\n");
686 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
687 return sprintf(buf
, "performance\n");
688 else if (policy
->governor
)
689 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
690 policy
->governor
->name
);
695 * store_scaling_governor - store policy for the specified CPU
697 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
698 const char *buf
, size_t count
)
701 char str_governor
[16];
702 struct cpufreq_policy new_policy
;
704 memcpy(&new_policy
, policy
, sizeof(*policy
));
706 ret
= sscanf(buf
, "%15s", str_governor
);
710 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
711 &new_policy
.governor
))
714 ret
= cpufreq_set_policy(policy
, &new_policy
);
715 return ret
? ret
: count
;
719 * show_scaling_driver - show the cpufreq driver currently loaded
721 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
723 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
727 * show_scaling_available_governors - show the available CPUfreq governors
729 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
733 struct cpufreq_governor
*t
;
736 i
+= sprintf(buf
, "performance powersave");
740 for_each_governor(t
) {
741 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
742 - (CPUFREQ_NAME_LEN
+ 2)))
744 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
747 i
+= sprintf(&buf
[i
], "\n");
751 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
756 for_each_cpu(cpu
, mask
) {
758 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
759 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
760 if (i
>= (PAGE_SIZE
- 5))
763 i
+= sprintf(&buf
[i
], "\n");
766 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
769 * show_related_cpus - show the CPUs affected by each transition even if
770 * hw coordination is in use
772 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
774 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
778 * show_affected_cpus - show the CPUs affected by each transition
780 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
782 return cpufreq_show_cpus(policy
->cpus
, buf
);
785 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
786 const char *buf
, size_t count
)
788 unsigned int freq
= 0;
791 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
794 ret
= sscanf(buf
, "%u", &freq
);
798 policy
->governor
->store_setspeed(policy
, freq
);
803 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
805 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
806 return sprintf(buf
, "<unsupported>\n");
808 return policy
->governor
->show_setspeed(policy
, buf
);
812 * show_bios_limit - show the current cpufreq HW/BIOS limitation
814 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
818 if (cpufreq_driver
->bios_limit
) {
819 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
821 return sprintf(buf
, "%u\n", limit
);
823 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
826 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
827 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
828 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
829 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
830 cpufreq_freq_attr_ro(scaling_available_governors
);
831 cpufreq_freq_attr_ro(scaling_driver
);
832 cpufreq_freq_attr_ro(scaling_cur_freq
);
833 cpufreq_freq_attr_ro(bios_limit
);
834 cpufreq_freq_attr_ro(related_cpus
);
835 cpufreq_freq_attr_ro(affected_cpus
);
836 cpufreq_freq_attr_rw(scaling_min_freq
);
837 cpufreq_freq_attr_rw(scaling_max_freq
);
838 cpufreq_freq_attr_rw(scaling_governor
);
839 cpufreq_freq_attr_rw(scaling_setspeed
);
841 static struct attribute
*default_attrs
[] = {
842 &cpuinfo_min_freq
.attr
,
843 &cpuinfo_max_freq
.attr
,
844 &cpuinfo_transition_latency
.attr
,
845 &scaling_min_freq
.attr
,
846 &scaling_max_freq
.attr
,
849 &scaling_governor
.attr
,
850 &scaling_driver
.attr
,
851 &scaling_available_governors
.attr
,
852 &scaling_setspeed
.attr
,
856 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
857 #define to_attr(a) container_of(a, struct freq_attr, attr)
859 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
861 struct cpufreq_policy
*policy
= to_policy(kobj
);
862 struct freq_attr
*fattr
= to_attr(attr
);
865 down_read(&policy
->rwsem
);
868 ret
= fattr
->show(policy
, buf
);
872 up_read(&policy
->rwsem
);
877 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
878 const char *buf
, size_t count
)
880 struct cpufreq_policy
*policy
= to_policy(kobj
);
881 struct freq_attr
*fattr
= to_attr(attr
);
882 ssize_t ret
= -EINVAL
;
886 if (!cpu_online(policy
->cpu
))
889 down_write(&policy
->rwsem
);
892 ret
= fattr
->store(policy
, buf
, count
);
896 up_write(&policy
->rwsem
);
903 static void cpufreq_sysfs_release(struct kobject
*kobj
)
905 struct cpufreq_policy
*policy
= to_policy(kobj
);
906 pr_debug("last reference is dropped\n");
907 complete(&policy
->kobj_unregister
);
910 static const struct sysfs_ops sysfs_ops
= {
915 static struct kobj_type ktype_cpufreq
= {
916 .sysfs_ops
= &sysfs_ops
,
917 .default_attrs
= default_attrs
,
918 .release
= cpufreq_sysfs_release
,
921 static int add_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
923 struct device
*cpu_dev
;
925 pr_debug("%s: Adding symlink for CPU: %u\n", __func__
, cpu
);
930 cpu_dev
= get_cpu_device(cpu
);
931 if (WARN_ON(!cpu_dev
))
934 return sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
, "cpufreq");
937 static void remove_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
939 struct device
*cpu_dev
;
941 pr_debug("%s: Removing symlink for CPU: %u\n", __func__
, cpu
);
943 cpu_dev
= get_cpu_device(cpu
);
944 if (WARN_ON(!cpu_dev
))
947 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
950 /* Add/remove symlinks for all related CPUs */
951 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
956 /* Some related CPUs might not be present (physically hotplugged) */
957 for_each_cpu(j
, policy
->real_cpus
) {
958 ret
= add_cpu_dev_symlink(policy
, j
);
966 static void cpufreq_remove_dev_symlink(struct cpufreq_policy
*policy
)
970 /* Some related CPUs might not be present (physically hotplugged) */
971 for_each_cpu(j
, policy
->real_cpus
)
972 remove_cpu_dev_symlink(policy
, j
);
975 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
)
977 struct freq_attr
**drv_attr
;
980 /* set up files for this cpu device */
981 drv_attr
= cpufreq_driver
->attr
;
982 while (drv_attr
&& *drv_attr
) {
983 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
988 if (cpufreq_driver
->get
) {
989 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
994 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
998 if (cpufreq_driver
->bios_limit
) {
999 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
1004 return cpufreq_add_dev_symlink(policy
);
1007 __weak
struct cpufreq_governor
*cpufreq_default_governor(void)
1012 static int cpufreq_init_policy(struct cpufreq_policy
*policy
)
1014 struct cpufreq_governor
*gov
= NULL
;
1015 struct cpufreq_policy new_policy
;
1017 memcpy(&new_policy
, policy
, sizeof(*policy
));
1019 /* Update governor of new_policy to the governor used before hotplug */
1020 gov
= find_governor(policy
->last_governor
);
1022 pr_debug("Restoring governor %s for cpu %d\n",
1023 policy
->governor
->name
, policy
->cpu
);
1025 gov
= cpufreq_default_governor();
1030 new_policy
.governor
= gov
;
1032 /* Use the default policy if there is no last_policy. */
1033 if (cpufreq_driver
->setpolicy
) {
1034 if (policy
->last_policy
)
1035 new_policy
.policy
= policy
->last_policy
;
1037 cpufreq_parse_governor(gov
->name
, &new_policy
.policy
,
1040 /* set default policy */
1041 return cpufreq_set_policy(policy
, &new_policy
);
1044 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
1048 /* Has this CPU been taken care of already? */
1049 if (cpumask_test_cpu(cpu
, policy
->cpus
))
1052 down_write(&policy
->rwsem
);
1054 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1056 pr_err("%s: Failed to stop governor\n", __func__
);
1061 cpumask_set_cpu(cpu
, policy
->cpus
);
1064 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1066 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1069 pr_err("%s: Failed to start governor\n", __func__
);
1073 up_write(&policy
->rwsem
);
1077 static struct cpufreq_policy
*cpufreq_policy_alloc(unsigned int cpu
)
1079 struct device
*dev
= get_cpu_device(cpu
);
1080 struct cpufreq_policy
*policy
;
1085 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
1089 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
1090 goto err_free_policy
;
1092 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
1093 goto err_free_cpumask
;
1095 if (!zalloc_cpumask_var(&policy
->real_cpus
, GFP_KERNEL
))
1096 goto err_free_rcpumask
;
1098 kobject_init(&policy
->kobj
, &ktype_cpufreq
);
1099 INIT_LIST_HEAD(&policy
->policy_list
);
1100 init_rwsem(&policy
->rwsem
);
1101 spin_lock_init(&policy
->transition_lock
);
1102 init_waitqueue_head(&policy
->transition_wait
);
1103 init_completion(&policy
->kobj_unregister
);
1104 INIT_WORK(&policy
->update
, handle_update
);
1110 free_cpumask_var(policy
->related_cpus
);
1112 free_cpumask_var(policy
->cpus
);
1119 static void cpufreq_policy_put_kobj(struct cpufreq_policy
*policy
, bool notify
)
1121 struct kobject
*kobj
;
1122 struct completion
*cmp
;
1125 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1126 CPUFREQ_REMOVE_POLICY
, policy
);
1128 down_write(&policy
->rwsem
);
1129 cpufreq_remove_dev_symlink(policy
);
1130 kobj
= &policy
->kobj
;
1131 cmp
= &policy
->kobj_unregister
;
1132 up_write(&policy
->rwsem
);
1136 * We need to make sure that the underlying kobj is
1137 * actually not referenced anymore by anybody before we
1138 * proceed with unloading.
1140 pr_debug("waiting for dropping of refcount\n");
1141 wait_for_completion(cmp
);
1142 pr_debug("wait complete\n");
1145 static void cpufreq_policy_free(struct cpufreq_policy
*policy
, bool notify
)
1147 unsigned long flags
;
1150 /* Remove policy from list */
1151 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1152 list_del(&policy
->policy_list
);
1154 for_each_cpu(cpu
, policy
->related_cpus
)
1155 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1156 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1158 cpufreq_policy_put_kobj(policy
, notify
);
1159 free_cpumask_var(policy
->real_cpus
);
1160 free_cpumask_var(policy
->related_cpus
);
1161 free_cpumask_var(policy
->cpus
);
1165 static int cpufreq_online(unsigned int cpu
)
1167 struct cpufreq_policy
*policy
;
1169 unsigned long flags
;
1173 pr_debug("%s: bringing CPU%u online\n", __func__
, cpu
);
1175 /* Check if this CPU already has a policy to manage it */
1176 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1178 WARN_ON(!cpumask_test_cpu(cpu
, policy
->related_cpus
));
1179 if (!policy_is_inactive(policy
))
1180 return cpufreq_add_policy_cpu(policy
, cpu
);
1182 /* This is the only online CPU for the policy. Start over. */
1184 down_write(&policy
->rwsem
);
1186 policy
->governor
= NULL
;
1187 up_write(&policy
->rwsem
);
1190 policy
= cpufreq_policy_alloc(cpu
);
1195 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1197 /* call driver. From then on the cpufreq must be able
1198 * to accept all calls to ->verify and ->setpolicy for this CPU
1200 ret
= cpufreq_driver
->init(policy
);
1202 pr_debug("initialization failed\n");
1203 goto out_free_policy
;
1206 down_write(&policy
->rwsem
);
1209 /* related_cpus should at least include policy->cpus. */
1210 cpumask_copy(policy
->related_cpus
, policy
->cpus
);
1211 /* Remember CPUs present at the policy creation time. */
1212 cpumask_and(policy
->real_cpus
, policy
->cpus
, cpu_present_mask
);
1214 /* Name and add the kobject */
1215 ret
= kobject_add(&policy
->kobj
, cpufreq_global_kobject
,
1217 cpumask_first(policy
->related_cpus
));
1219 pr_err("%s: failed to add policy->kobj: %d\n", __func__
,
1221 goto out_exit_policy
;
1226 * affected cpus must always be the one, which are online. We aren't
1227 * managing offline cpus here.
1229 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1232 policy
->user_policy
.min
= policy
->min
;
1233 policy
->user_policy
.max
= policy
->max
;
1235 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1236 for_each_cpu(j
, policy
->related_cpus
)
1237 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1238 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1241 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
1242 policy
->cur
= cpufreq_driver
->get(policy
->cpu
);
1244 pr_err("%s: ->get() failed\n", __func__
);
1245 goto out_exit_policy
;
1250 * Sometimes boot loaders set CPU frequency to a value outside of
1251 * frequency table present with cpufreq core. In such cases CPU might be
1252 * unstable if it has to run on that frequency for long duration of time
1253 * and so its better to set it to a frequency which is specified in
1254 * freq-table. This also makes cpufreq stats inconsistent as
1255 * cpufreq-stats would fail to register because current frequency of CPU
1256 * isn't found in freq-table.
1258 * Because we don't want this change to effect boot process badly, we go
1259 * for the next freq which is >= policy->cur ('cur' must be set by now,
1260 * otherwise we will end up setting freq to lowest of the table as 'cur'
1261 * is initialized to zero).
1263 * We are passing target-freq as "policy->cur - 1" otherwise
1264 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1265 * equal to target-freq.
1267 if ((cpufreq_driver
->flags
& CPUFREQ_NEED_INITIAL_FREQ_CHECK
)
1269 /* Are we running at unknown frequency ? */
1270 ret
= cpufreq_frequency_table_get_index(policy
, policy
->cur
);
1271 if (ret
== -EINVAL
) {
1272 /* Warn user and fix it */
1273 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1274 __func__
, policy
->cpu
, policy
->cur
);
1275 ret
= __cpufreq_driver_target(policy
, policy
->cur
- 1,
1276 CPUFREQ_RELATION_L
);
1279 * Reaching here after boot in a few seconds may not
1280 * mean that system will remain stable at "unknown"
1281 * frequency for longer duration. Hence, a BUG_ON().
1284 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1285 __func__
, policy
->cpu
, policy
->cur
);
1289 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1290 CPUFREQ_START
, policy
);
1293 ret
= cpufreq_add_dev_interface(policy
);
1295 goto out_exit_policy
;
1296 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1297 CPUFREQ_CREATE_POLICY
, policy
);
1299 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1300 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1301 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1304 ret
= cpufreq_init_policy(policy
);
1306 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1307 __func__
, cpu
, ret
);
1308 /* cpufreq_policy_free() will notify based on this */
1310 goto out_exit_policy
;
1313 up_write(&policy
->rwsem
);
1315 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1317 /* Callback for handling stuff after policy is ready */
1318 if (cpufreq_driver
->ready
)
1319 cpufreq_driver
->ready(policy
);
1321 pr_debug("initialization complete\n");
1326 up_write(&policy
->rwsem
);
1328 if (cpufreq_driver
->exit
)
1329 cpufreq_driver
->exit(policy
);
1331 cpufreq_policy_free(policy
, !new_policy
);
1336 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1338 * @sif: Subsystem interface structure pointer (not used)
1340 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1342 unsigned cpu
= dev
->id
;
1345 dev_dbg(dev
, "%s: adding CPU%u\n", __func__
, cpu
);
1347 if (cpu_online(cpu
)) {
1348 ret
= cpufreq_online(cpu
);
1351 * A hotplug notifier will follow and we will handle it as CPU
1352 * online then. For now, just create the sysfs link, unless
1353 * there is no policy or the link is already present.
1355 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1357 ret
= policy
&& !cpumask_test_and_set_cpu(cpu
, policy
->real_cpus
)
1358 ? add_cpu_dev_symlink(policy
, cpu
) : 0;
1364 static void cpufreq_offline(unsigned int cpu
)
1366 struct cpufreq_policy
*policy
;
1369 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1371 policy
= cpufreq_cpu_get_raw(cpu
);
1373 pr_debug("%s: No cpu_data found\n", __func__
);
1377 down_write(&policy
->rwsem
);
1379 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1381 pr_err("%s: Failed to stop governor\n", __func__
);
1384 cpumask_clear_cpu(cpu
, policy
->cpus
);
1386 if (policy_is_inactive(policy
)) {
1388 strncpy(policy
->last_governor
, policy
->governor
->name
,
1391 policy
->last_policy
= policy
->policy
;
1392 } else if (cpu
== policy
->cpu
) {
1393 /* Nominate new CPU */
1394 policy
->cpu
= cpumask_any(policy
->cpus
);
1397 /* Start governor again for active policy */
1398 if (!policy_is_inactive(policy
)) {
1400 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1402 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1405 pr_err("%s: Failed to start governor\n", __func__
);
1411 if (cpufreq_driver
->stop_cpu
)
1412 cpufreq_driver
->stop_cpu(policy
);
1414 /* If cpu is last user of policy, free policy */
1416 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
1418 pr_err("%s: Failed to exit governor\n", __func__
);
1422 * Perform the ->exit() even during light-weight tear-down,
1423 * since this is a core component, and is essential for the
1424 * subsequent light-weight ->init() to succeed.
1426 if (cpufreq_driver
->exit
) {
1427 cpufreq_driver
->exit(policy
);
1428 policy
->freq_table
= NULL
;
1432 up_write(&policy
->rwsem
);
1436 * cpufreq_remove_dev - remove a CPU device
1438 * Removes the cpufreq interface for a CPU device.
1440 static void cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1442 unsigned int cpu
= dev
->id
;
1443 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1448 if (cpu_online(cpu
))
1449 cpufreq_offline(cpu
);
1451 cpumask_clear_cpu(cpu
, policy
->real_cpus
);
1452 remove_cpu_dev_symlink(policy
, cpu
);
1454 if (cpumask_empty(policy
->real_cpus
))
1455 cpufreq_policy_free(policy
, true);
1458 static void handle_update(struct work_struct
*work
)
1460 struct cpufreq_policy
*policy
=
1461 container_of(work
, struct cpufreq_policy
, update
);
1462 unsigned int cpu
= policy
->cpu
;
1463 pr_debug("handle_update for cpu %u called\n", cpu
);
1464 cpufreq_update_policy(cpu
);
1468 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1470 * @policy: policy managing CPUs
1471 * @new_freq: CPU frequency the CPU actually runs at
1473 * We adjust to current frequency first, and need to clean up later.
1474 * So either call to cpufreq_update_policy() or schedule handle_update()).
1476 static void cpufreq_out_of_sync(struct cpufreq_policy
*policy
,
1477 unsigned int new_freq
)
1479 struct cpufreq_freqs freqs
;
1481 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1482 policy
->cur
, new_freq
);
1484 freqs
.old
= policy
->cur
;
1485 freqs
.new = new_freq
;
1487 cpufreq_freq_transition_begin(policy
, &freqs
);
1488 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1492 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1495 * This is the last known freq, without actually getting it from the driver.
1496 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1498 unsigned int cpufreq_quick_get(unsigned int cpu
)
1500 struct cpufreq_policy
*policy
;
1501 unsigned int ret_freq
= 0;
1503 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
1504 return cpufreq_driver
->get(cpu
);
1506 policy
= cpufreq_cpu_get(cpu
);
1508 ret_freq
= policy
->cur
;
1509 cpufreq_cpu_put(policy
);
1514 EXPORT_SYMBOL(cpufreq_quick_get
);
1517 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1520 * Just return the max possible frequency for a given CPU.
1522 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1524 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1525 unsigned int ret_freq
= 0;
1528 ret_freq
= policy
->max
;
1529 cpufreq_cpu_put(policy
);
1534 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1536 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
)
1538 unsigned int ret_freq
= 0;
1540 if (!cpufreq_driver
->get
)
1543 ret_freq
= cpufreq_driver
->get(policy
->cpu
);
1545 /* Updating inactive policies is invalid, so avoid doing that. */
1546 if (unlikely(policy_is_inactive(policy
)))
1549 if (ret_freq
&& policy
->cur
&&
1550 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1551 /* verify no discrepancy between actual and
1552 saved value exists */
1553 if (unlikely(ret_freq
!= policy
->cur
)) {
1554 cpufreq_out_of_sync(policy
, ret_freq
);
1555 schedule_work(&policy
->update
);
1563 * cpufreq_get - get the current CPU frequency (in kHz)
1566 * Get the CPU current (static) CPU frequency
1568 unsigned int cpufreq_get(unsigned int cpu
)
1570 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1571 unsigned int ret_freq
= 0;
1574 down_read(&policy
->rwsem
);
1575 ret_freq
= __cpufreq_get(policy
);
1576 up_read(&policy
->rwsem
);
1578 cpufreq_cpu_put(policy
);
1583 EXPORT_SYMBOL(cpufreq_get
);
1585 static struct subsys_interface cpufreq_interface
= {
1587 .subsys
= &cpu_subsys
,
1588 .add_dev
= cpufreq_add_dev
,
1589 .remove_dev
= cpufreq_remove_dev
,
1593 * In case platform wants some specific frequency to be configured
1596 int cpufreq_generic_suspend(struct cpufreq_policy
*policy
)
1600 if (!policy
->suspend_freq
) {
1601 pr_debug("%s: suspend_freq not defined\n", __func__
);
1605 pr_debug("%s: Setting suspend-freq: %u\n", __func__
,
1606 policy
->suspend_freq
);
1608 ret
= __cpufreq_driver_target(policy
, policy
->suspend_freq
,
1609 CPUFREQ_RELATION_H
);
1611 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1612 __func__
, policy
->suspend_freq
, ret
);
1616 EXPORT_SYMBOL(cpufreq_generic_suspend
);
1619 * cpufreq_suspend() - Suspend CPUFreq governors
1621 * Called during system wide Suspend/Hibernate cycles for suspending governors
1622 * as some platforms can't change frequency after this point in suspend cycle.
1623 * Because some of the devices (like: i2c, regulators, etc) they use for
1624 * changing frequency are suspended quickly after this point.
1626 void cpufreq_suspend(void)
1628 struct cpufreq_policy
*policy
;
1631 if (!cpufreq_driver
)
1637 pr_debug("%s: Suspending Governors\n", __func__
);
1639 for_each_active_policy(policy
) {
1640 down_write(&policy
->rwsem
);
1641 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1642 up_write(&policy
->rwsem
);
1645 pr_err("%s: Failed to stop governor for policy: %p\n",
1647 else if (cpufreq_driver
->suspend
1648 && cpufreq_driver
->suspend(policy
))
1649 pr_err("%s: Failed to suspend driver: %p\n", __func__
,
1654 cpufreq_suspended
= true;
1658 * cpufreq_resume() - Resume CPUFreq governors
1660 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1661 * are suspended with cpufreq_suspend().
1663 void cpufreq_resume(void)
1665 struct cpufreq_policy
*policy
;
1668 if (!cpufreq_driver
)
1671 cpufreq_suspended
= false;
1676 pr_debug("%s: Resuming Governors\n", __func__
);
1678 for_each_active_policy(policy
) {
1679 if (cpufreq_driver
->resume
&& cpufreq_driver
->resume(policy
)) {
1680 pr_err("%s: Failed to resume driver: %p\n", __func__
,
1683 down_write(&policy
->rwsem
);
1684 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1686 __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1687 up_write(&policy
->rwsem
);
1690 pr_err("%s: Failed to start governor for policy: %p\n",
1696 * schedule call cpufreq_update_policy() for first-online CPU, as that
1697 * wouldn't be hotplugged-out on suspend. It will verify that the
1698 * current freq is in sync with what we believe it to be.
1700 policy
= cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask
));
1701 if (WARN_ON(!policy
))
1704 schedule_work(&policy
->update
);
1708 * cpufreq_get_current_driver - return current driver's name
1710 * Return the name string of the currently loaded cpufreq driver
1713 const char *cpufreq_get_current_driver(void)
1716 return cpufreq_driver
->name
;
1720 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1723 * cpufreq_get_driver_data - return current driver data
1725 * Return the private data of the currently loaded cpufreq
1726 * driver, or NULL if no cpufreq driver is loaded.
1728 void *cpufreq_get_driver_data(void)
1731 return cpufreq_driver
->driver_data
;
1735 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data
);
1737 /*********************************************************************
1738 * NOTIFIER LISTS INTERFACE *
1739 *********************************************************************/
1742 * cpufreq_register_notifier - register a driver with cpufreq
1743 * @nb: notifier function to register
1744 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1746 * Add a driver to one of two lists: either a list of drivers that
1747 * are notified about clock rate changes (once before and once after
1748 * the transition), or a list of drivers that are notified about
1749 * changes in cpufreq policy.
1751 * This function may sleep, and has the same return conditions as
1752 * blocking_notifier_chain_register.
1754 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1758 if (cpufreq_disabled())
1761 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1764 case CPUFREQ_TRANSITION_NOTIFIER
:
1765 ret
= srcu_notifier_chain_register(
1766 &cpufreq_transition_notifier_list
, nb
);
1768 case CPUFREQ_POLICY_NOTIFIER
:
1769 ret
= blocking_notifier_chain_register(
1770 &cpufreq_policy_notifier_list
, nb
);
1778 EXPORT_SYMBOL(cpufreq_register_notifier
);
1781 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1782 * @nb: notifier block to be unregistered
1783 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1785 * Remove a driver from the CPU frequency notifier list.
1787 * This function may sleep, and has the same return conditions as
1788 * blocking_notifier_chain_unregister.
1790 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1794 if (cpufreq_disabled())
1798 case CPUFREQ_TRANSITION_NOTIFIER
:
1799 ret
= srcu_notifier_chain_unregister(
1800 &cpufreq_transition_notifier_list
, nb
);
1802 case CPUFREQ_POLICY_NOTIFIER
:
1803 ret
= blocking_notifier_chain_unregister(
1804 &cpufreq_policy_notifier_list
, nb
);
1812 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1815 /*********************************************************************
1817 *********************************************************************/
1819 /* Must set freqs->new to intermediate frequency */
1820 static int __target_intermediate(struct cpufreq_policy
*policy
,
1821 struct cpufreq_freqs
*freqs
, int index
)
1825 freqs
->new = cpufreq_driver
->get_intermediate(policy
, index
);
1827 /* We don't need to switch to intermediate freq */
1831 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1832 __func__
, policy
->cpu
, freqs
->old
, freqs
->new);
1834 cpufreq_freq_transition_begin(policy
, freqs
);
1835 ret
= cpufreq_driver
->target_intermediate(policy
, index
);
1836 cpufreq_freq_transition_end(policy
, freqs
, ret
);
1839 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1845 static int __target_index(struct cpufreq_policy
*policy
,
1846 struct cpufreq_frequency_table
*freq_table
, int index
)
1848 struct cpufreq_freqs freqs
= {.old
= policy
->cur
, .flags
= 0};
1849 unsigned int intermediate_freq
= 0;
1850 int retval
= -EINVAL
;
1853 notify
= !(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
);
1855 /* Handle switching to intermediate frequency */
1856 if (cpufreq_driver
->get_intermediate
) {
1857 retval
= __target_intermediate(policy
, &freqs
, index
);
1861 intermediate_freq
= freqs
.new;
1862 /* Set old freq to intermediate */
1863 if (intermediate_freq
)
1864 freqs
.old
= freqs
.new;
1867 freqs
.new = freq_table
[index
].frequency
;
1868 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1869 __func__
, policy
->cpu
, freqs
.old
, freqs
.new);
1871 cpufreq_freq_transition_begin(policy
, &freqs
);
1874 retval
= cpufreq_driver
->target_index(policy
, index
);
1876 pr_err("%s: Failed to change cpu frequency: %d\n", __func__
,
1880 cpufreq_freq_transition_end(policy
, &freqs
, retval
);
1883 * Failed after setting to intermediate freq? Driver should have
1884 * reverted back to initial frequency and so should we. Check
1885 * here for intermediate_freq instead of get_intermediate, in
1886 * case we haven't switched to intermediate freq at all.
1888 if (unlikely(retval
&& intermediate_freq
)) {
1889 freqs
.old
= intermediate_freq
;
1890 freqs
.new = policy
->restore_freq
;
1891 cpufreq_freq_transition_begin(policy
, &freqs
);
1892 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1899 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1900 unsigned int target_freq
,
1901 unsigned int relation
)
1903 unsigned int old_target_freq
= target_freq
;
1904 int retval
= -EINVAL
;
1906 if (cpufreq_disabled())
1909 /* Make sure that target_freq is within supported range */
1910 if (target_freq
> policy
->max
)
1911 target_freq
= policy
->max
;
1912 if (target_freq
< policy
->min
)
1913 target_freq
= policy
->min
;
1915 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1916 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1919 * This might look like a redundant call as we are checking it again
1920 * after finding index. But it is left intentionally for cases where
1921 * exactly same freq is called again and so we can save on few function
1924 if (target_freq
== policy
->cur
)
1927 /* Save last value to restore later on errors */
1928 policy
->restore_freq
= policy
->cur
;
1930 if (cpufreq_driver
->target
)
1931 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
1932 else if (cpufreq_driver
->target_index
) {
1933 struct cpufreq_frequency_table
*freq_table
;
1936 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
1937 if (unlikely(!freq_table
)) {
1938 pr_err("%s: Unable to find freq_table\n", __func__
);
1942 retval
= cpufreq_frequency_table_target(policy
, freq_table
,
1943 target_freq
, relation
, &index
);
1944 if (unlikely(retval
)) {
1945 pr_err("%s: Unable to find matching freq\n", __func__
);
1949 if (freq_table
[index
].frequency
== policy
->cur
) {
1954 retval
= __target_index(policy
, freq_table
, index
);
1960 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1962 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1963 unsigned int target_freq
,
1964 unsigned int relation
)
1968 down_write(&policy
->rwsem
);
1970 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1972 up_write(&policy
->rwsem
);
1976 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1978 __weak
struct cpufreq_governor
*cpufreq_fallback_governor(void)
1983 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1988 /* Don't start any governor operations if we are entering suspend */
1989 if (cpufreq_suspended
)
1992 * Governor might not be initiated here if ACPI _PPC changed
1993 * notification happened, so check it.
1995 if (!policy
->governor
)
1998 if (policy
->governor
->max_transition_latency
&&
1999 policy
->cpuinfo
.transition_latency
>
2000 policy
->governor
->max_transition_latency
) {
2001 struct cpufreq_governor
*gov
= cpufreq_fallback_governor();
2004 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2005 policy
->governor
->name
, gov
->name
);
2006 policy
->governor
= gov
;
2012 if (event
== CPUFREQ_GOV_POLICY_INIT
)
2013 if (!try_module_get(policy
->governor
->owner
))
2016 pr_debug("%s: for CPU %u, event %u\n", __func__
, policy
->cpu
, event
);
2018 mutex_lock(&cpufreq_governor_lock
);
2019 if ((policy
->governor_enabled
&& event
== CPUFREQ_GOV_START
)
2020 || (!policy
->governor_enabled
2021 && (event
== CPUFREQ_GOV_LIMITS
|| event
== CPUFREQ_GOV_STOP
))) {
2022 mutex_unlock(&cpufreq_governor_lock
);
2026 if (event
== CPUFREQ_GOV_STOP
)
2027 policy
->governor_enabled
= false;
2028 else if (event
== CPUFREQ_GOV_START
)
2029 policy
->governor_enabled
= true;
2031 mutex_unlock(&cpufreq_governor_lock
);
2033 ret
= policy
->governor
->governor(policy
, event
);
2036 if (event
== CPUFREQ_GOV_POLICY_INIT
)
2037 policy
->governor
->initialized
++;
2038 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
2039 policy
->governor
->initialized
--;
2041 /* Restore original values */
2042 mutex_lock(&cpufreq_governor_lock
);
2043 if (event
== CPUFREQ_GOV_STOP
)
2044 policy
->governor_enabled
= true;
2045 else if (event
== CPUFREQ_GOV_START
)
2046 policy
->governor_enabled
= false;
2047 mutex_unlock(&cpufreq_governor_lock
);
2050 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
2051 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
2052 module_put(policy
->governor
->owner
);
2057 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
2064 if (cpufreq_disabled())
2067 mutex_lock(&cpufreq_governor_mutex
);
2069 governor
->initialized
= 0;
2071 if (!find_governor(governor
->name
)) {
2073 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
2076 mutex_unlock(&cpufreq_governor_mutex
);
2079 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
2081 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
2083 struct cpufreq_policy
*policy
;
2084 unsigned long flags
;
2089 if (cpufreq_disabled())
2092 /* clear last_governor for all inactive policies */
2093 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
2094 for_each_inactive_policy(policy
) {
2095 if (!strcmp(policy
->last_governor
, governor
->name
)) {
2096 policy
->governor
= NULL
;
2097 strcpy(policy
->last_governor
, "\0");
2100 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2102 mutex_lock(&cpufreq_governor_mutex
);
2103 list_del(&governor
->governor_list
);
2104 mutex_unlock(&cpufreq_governor_mutex
);
2107 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
2110 /*********************************************************************
2111 * POLICY INTERFACE *
2112 *********************************************************************/
2115 * cpufreq_get_policy - get the current cpufreq_policy
2116 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2119 * Reads the current cpufreq policy.
2121 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
2123 struct cpufreq_policy
*cpu_policy
;
2127 cpu_policy
= cpufreq_cpu_get(cpu
);
2131 memcpy(policy
, cpu_policy
, sizeof(*policy
));
2133 cpufreq_cpu_put(cpu_policy
);
2136 EXPORT_SYMBOL(cpufreq_get_policy
);
2139 * policy : current policy.
2140 * new_policy: policy to be set.
2142 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
2143 struct cpufreq_policy
*new_policy
)
2145 struct cpufreq_governor
*old_gov
;
2148 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2149 new_policy
->cpu
, new_policy
->min
, new_policy
->max
);
2151 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
2154 * This check works well when we store new min/max freq attributes,
2155 * because new_policy is a copy of policy with one field updated.
2157 if (new_policy
->min
> new_policy
->max
)
2160 /* verify the cpu speed can be set within this limit */
2161 ret
= cpufreq_driver
->verify(new_policy
);
2165 /* adjust if necessary - all reasons */
2166 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2167 CPUFREQ_ADJUST
, new_policy
);
2170 * verify the cpu speed can be set within this limit, which might be
2171 * different to the first one
2173 ret
= cpufreq_driver
->verify(new_policy
);
2177 /* notification of the new policy */
2178 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2179 CPUFREQ_NOTIFY
, new_policy
);
2181 policy
->min
= new_policy
->min
;
2182 policy
->max
= new_policy
->max
;
2184 pr_debug("new min and max freqs are %u - %u kHz\n",
2185 policy
->min
, policy
->max
);
2187 if (cpufreq_driver
->setpolicy
) {
2188 policy
->policy
= new_policy
->policy
;
2189 pr_debug("setting range\n");
2190 return cpufreq_driver
->setpolicy(new_policy
);
2193 if (new_policy
->governor
== policy
->governor
)
2196 pr_debug("governor switch\n");
2198 /* save old, working values */
2199 old_gov
= policy
->governor
;
2200 /* end old governor */
2202 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
2204 /* This can happen due to race with other operations */
2205 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2206 __func__
, old_gov
->name
, ret
);
2210 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2212 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2213 __func__
, old_gov
->name
, ret
);
2218 /* start new governor */
2219 policy
->governor
= new_policy
->governor
;
2220 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
);
2222 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2226 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2229 /* new governor failed, so re-start old one */
2230 pr_debug("starting governor %s failed\n", policy
->governor
->name
);
2232 policy
->governor
= old_gov
;
2233 if (__cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
))
2234 policy
->governor
= NULL
;
2236 __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2242 pr_debug("governor: change or update limits\n");
2243 return __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2247 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2248 * @cpu: CPU which shall be re-evaluated
2250 * Useful for policy notifiers which have different necessities
2251 * at different times.
2253 int cpufreq_update_policy(unsigned int cpu
)
2255 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
2256 struct cpufreq_policy new_policy
;
2262 down_write(&policy
->rwsem
);
2264 pr_debug("updating policy for CPU %u\n", cpu
);
2265 memcpy(&new_policy
, policy
, sizeof(*policy
));
2266 new_policy
.min
= policy
->user_policy
.min
;
2267 new_policy
.max
= policy
->user_policy
.max
;
2270 * BIOS might change freq behind our back
2271 * -> ask driver for current freq and notify governors about a change
2273 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
2274 new_policy
.cur
= cpufreq_driver
->get(cpu
);
2275 if (WARN_ON(!new_policy
.cur
)) {
2281 pr_debug("Driver did not initialize current freq\n");
2282 policy
->cur
= new_policy
.cur
;
2284 if (policy
->cur
!= new_policy
.cur
&& has_target())
2285 cpufreq_out_of_sync(policy
, new_policy
.cur
);
2289 ret
= cpufreq_set_policy(policy
, &new_policy
);
2292 up_write(&policy
->rwsem
);
2294 cpufreq_cpu_put(policy
);
2297 EXPORT_SYMBOL(cpufreq_update_policy
);
2299 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2300 unsigned long action
, void *hcpu
)
2302 unsigned int cpu
= (unsigned long)hcpu
;
2304 switch (action
& ~CPU_TASKS_FROZEN
) {
2306 cpufreq_online(cpu
);
2309 case CPU_DOWN_PREPARE
:
2310 cpufreq_offline(cpu
);
2313 case CPU_DOWN_FAILED
:
2314 cpufreq_online(cpu
);
2320 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2321 .notifier_call
= cpufreq_cpu_callback
,
2324 /*********************************************************************
2326 *********************************************************************/
2327 static int cpufreq_boost_set_sw(int state
)
2329 struct cpufreq_frequency_table
*freq_table
;
2330 struct cpufreq_policy
*policy
;
2333 for_each_active_policy(policy
) {
2334 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
2336 ret
= cpufreq_frequency_table_cpuinfo(policy
,
2339 pr_err("%s: Policy frequency update failed\n",
2344 down_write(&policy
->rwsem
);
2345 policy
->user_policy
.max
= policy
->max
;
2346 __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2347 up_write(&policy
->rwsem
);
2354 int cpufreq_boost_trigger_state(int state
)
2356 unsigned long flags
;
2359 if (cpufreq_driver
->boost_enabled
== state
)
2362 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2363 cpufreq_driver
->boost_enabled
= state
;
2364 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2366 ret
= cpufreq_driver
->set_boost(state
);
2368 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2369 cpufreq_driver
->boost_enabled
= !state
;
2370 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2372 pr_err("%s: Cannot %s BOOST\n",
2373 __func__
, state
? "enable" : "disable");
2379 static bool cpufreq_boost_supported(void)
2381 return likely(cpufreq_driver
) && cpufreq_driver
->set_boost
;
2384 static int create_boost_sysfs_file(void)
2388 ret
= sysfs_create_file(cpufreq_global_kobject
, &boost
.attr
);
2390 pr_err("%s: cannot register global BOOST sysfs file\n",
2396 static void remove_boost_sysfs_file(void)
2398 if (cpufreq_boost_supported())
2399 sysfs_remove_file(cpufreq_global_kobject
, &boost
.attr
);
2402 int cpufreq_enable_boost_support(void)
2404 if (!cpufreq_driver
)
2407 if (cpufreq_boost_supported())
2410 cpufreq_driver
->set_boost
= cpufreq_boost_set_sw
;
2412 /* This will get removed on driver unregister */
2413 return create_boost_sysfs_file();
2415 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support
);
2417 int cpufreq_boost_enabled(void)
2419 return cpufreq_driver
->boost_enabled
;
2421 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled
);
2423 /*********************************************************************
2424 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2425 *********************************************************************/
2428 * cpufreq_register_driver - register a CPU Frequency driver
2429 * @driver_data: A struct cpufreq_driver containing the values#
2430 * submitted by the CPU Frequency driver.
2432 * Registers a CPU Frequency driver to this core code. This code
2433 * returns zero on success, -EBUSY when another driver got here first
2434 * (and isn't unregistered in the meantime).
2437 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2439 unsigned long flags
;
2442 if (cpufreq_disabled())
2445 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2446 !(driver_data
->setpolicy
|| driver_data
->target_index
||
2447 driver_data
->target
) ||
2448 (driver_data
->setpolicy
&& (driver_data
->target_index
||
2449 driver_data
->target
)) ||
2450 (!!driver_data
->get_intermediate
!= !!driver_data
->target_intermediate
))
2453 pr_debug("trying to register driver %s\n", driver_data
->name
);
2455 /* Protect against concurrent CPU online/offline. */
2458 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2459 if (cpufreq_driver
) {
2460 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2464 cpufreq_driver
= driver_data
;
2465 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2467 if (driver_data
->setpolicy
)
2468 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2470 if (cpufreq_boost_supported()) {
2471 ret
= create_boost_sysfs_file();
2473 goto err_null_driver
;
2476 ret
= subsys_interface_register(&cpufreq_interface
);
2478 goto err_boost_unreg
;
2480 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
) &&
2481 list_empty(&cpufreq_policy_list
)) {
2482 /* if all ->init() calls failed, unregister */
2483 pr_debug("%s: No CPU initialized for driver %s\n", __func__
,
2488 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2489 pr_debug("driver %s up and running\n", driver_data
->name
);
2496 subsys_interface_unregister(&cpufreq_interface
);
2498 remove_boost_sysfs_file();
2500 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2501 cpufreq_driver
= NULL
;
2502 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2505 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2508 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2510 * Unregister the current CPUFreq driver. Only call this if you have
2511 * the right to do so, i.e. if you have succeeded in initialising before!
2512 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2513 * currently not initialised.
2515 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2517 unsigned long flags
;
2519 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2522 pr_debug("unregistering driver %s\n", driver
->name
);
2524 /* Protect against concurrent cpu hotplug */
2526 subsys_interface_unregister(&cpufreq_interface
);
2527 remove_boost_sysfs_file();
2528 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2530 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2532 cpufreq_driver
= NULL
;
2534 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2539 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2542 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2543 * or mutexes when secondary CPUs are halted.
2545 static struct syscore_ops cpufreq_syscore_ops
= {
2546 .shutdown
= cpufreq_suspend
,
2549 struct kobject
*cpufreq_global_kobject
;
2550 EXPORT_SYMBOL(cpufreq_global_kobject
);
2552 static int __init
cpufreq_core_init(void)
2554 if (cpufreq_disabled())
2557 cpufreq_global_kobject
= kobject_create_and_add("cpufreq", &cpu_subsys
.dev_root
->kobj
);
2558 BUG_ON(!cpufreq_global_kobject
);
2560 register_syscore_ops(&cpufreq_syscore_ops
);
2564 core_initcall(cpufreq_core_init
);