2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list
);
36 static inline bool policy_is_inactive(struct cpufreq_policy
*policy
)
38 return cpumask_empty(policy
->cpus
);
41 static bool suitable_policy(struct cpufreq_policy
*policy
, bool active
)
43 return active
== !policy_is_inactive(policy
);
46 /* Finds Next Acive/Inactive policy */
47 static struct cpufreq_policy
*next_policy(struct cpufreq_policy
*policy
,
51 policy
= list_next_entry(policy
, policy_list
);
53 /* No more policies in the list */
54 if (&policy
->policy_list
== &cpufreq_policy_list
)
56 } while (!suitable_policy(policy
, active
));
61 static struct cpufreq_policy
*first_policy(bool active
)
63 struct cpufreq_policy
*policy
;
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list
))
69 policy
= list_first_entry(&cpufreq_policy_list
, typeof(*policy
),
72 if (!suitable_policy(policy
, active
))
73 policy
= next_policy(policy
, active
);
78 /* Macros to iterate over CPU policies */
79 #define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
82 __policy = next_policy(__policy, __active))
84 #define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86 #define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
89 #define for_each_policy(__policy) \
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
92 /* Iterate over governors */
93 static LIST_HEAD(cpufreq_governor_list
);
94 #define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
98 * The "cpufreq driver" - the arch- or hardware-dependent low
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
102 static struct cpufreq_driver
*cpufreq_driver
;
103 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
104 static DEFINE_RWLOCK(cpufreq_driver_lock
);
105 DEFINE_MUTEX(cpufreq_governor_lock
);
107 /* Flag to suspend/resume CPUFreq governors */
108 static bool cpufreq_suspended
;
110 static inline bool has_target(void)
112 return cpufreq_driver
->target_index
|| cpufreq_driver
->target
;
115 /* internal prototypes */
116 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
118 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
);
119 static void handle_update(struct work_struct
*work
);
122 * Two notifier lists: the "policy" list is involved in the
123 * validation process for a new CPU frequency policy; the
124 * "transition" list for kernel code that needs to handle
125 * changes to devices when the CPU clock speed changes.
126 * The mutex locks both lists.
128 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
129 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
131 static bool init_cpufreq_transition_notifier_list_called
;
132 static int __init
init_cpufreq_transition_notifier_list(void)
134 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
135 init_cpufreq_transition_notifier_list_called
= true;
138 pure_initcall(init_cpufreq_transition_notifier_list
);
140 static int off __read_mostly
;
141 static int cpufreq_disabled(void)
145 void disable_cpufreq(void)
149 static DEFINE_MUTEX(cpufreq_governor_mutex
);
151 bool have_governor_per_policy(void)
153 return !!(cpufreq_driver
->flags
& CPUFREQ_HAVE_GOVERNOR_PER_POLICY
);
155 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
157 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
159 if (have_governor_per_policy())
160 return &policy
->kobj
;
162 return cpufreq_global_kobject
;
164 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
166 struct cpufreq_frequency_table
*cpufreq_frequency_get_table(unsigned int cpu
)
168 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
170 return policy
&& !policy_is_inactive(policy
) ?
171 policy
->freq_table
: NULL
;
173 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table
);
175 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
181 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
183 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
184 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
185 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
186 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
187 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
188 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
190 idle_time
= cur_wall_time
- busy_time
;
192 *wall
= cputime_to_usecs(cur_wall_time
);
194 return cputime_to_usecs(idle_time
);
197 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
199 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
201 if (idle_time
== -1ULL)
202 return get_cpu_idle_time_jiffy(cpu
, wall
);
204 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
208 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
211 * This is a generic cpufreq init() routine which can be used by cpufreq
212 * drivers of SMP systems. It will do following:
213 * - validate & show freq table passed
214 * - set policies transition latency
215 * - policy->cpus with all possible CPUs
217 int cpufreq_generic_init(struct cpufreq_policy
*policy
,
218 struct cpufreq_frequency_table
*table
,
219 unsigned int transition_latency
)
223 ret
= cpufreq_table_validate_and_show(policy
, table
);
225 pr_err("%s: invalid frequency table: %d\n", __func__
, ret
);
229 policy
->cpuinfo
.transition_latency
= transition_latency
;
232 * The driver only supports the SMP configuration where all processors
233 * share the clock and voltage and clock.
235 cpumask_setall(policy
->cpus
);
239 EXPORT_SYMBOL_GPL(cpufreq_generic_init
);
241 struct cpufreq_policy
*cpufreq_cpu_get_raw(unsigned int cpu
)
243 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
245 return policy
&& cpumask_test_cpu(cpu
, policy
->cpus
) ? policy
: NULL
;
247 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw
);
249 unsigned int cpufreq_generic_get(unsigned int cpu
)
251 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(cpu
);
253 if (!policy
|| IS_ERR(policy
->clk
)) {
254 pr_err("%s: No %s associated to cpu: %d\n",
255 __func__
, policy
? "clk" : "policy", cpu
);
259 return clk_get_rate(policy
->clk
) / 1000;
261 EXPORT_SYMBOL_GPL(cpufreq_generic_get
);
264 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
266 * @cpu: cpu to find policy for.
268 * This returns policy for 'cpu', returns NULL if it doesn't exist.
269 * It also increments the kobject reference count to mark it busy and so would
270 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
271 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
272 * freed as that depends on the kobj count.
274 * Return: A valid policy on success, otherwise NULL on failure.
276 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
278 struct cpufreq_policy
*policy
= NULL
;
281 if (WARN_ON(cpu
>= nr_cpu_ids
))
284 /* get the cpufreq driver */
285 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
287 if (cpufreq_driver
) {
289 policy
= cpufreq_cpu_get_raw(cpu
);
291 kobject_get(&policy
->kobj
);
294 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
298 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
301 * cpufreq_cpu_put: Decrements the usage count of a policy
303 * @policy: policy earlier returned by cpufreq_cpu_get().
305 * This decrements the kobject reference count incremented earlier by calling
308 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
310 kobject_put(&policy
->kobj
);
312 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
314 /*********************************************************************
315 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
316 *********************************************************************/
319 * adjust_jiffies - adjust the system "loops_per_jiffy"
321 * This function alters the system "loops_per_jiffy" for the clock
322 * speed change. Note that loops_per_jiffy cannot be updated on SMP
323 * systems as each CPU might be scaled differently. So, use the arch
324 * per-CPU loops_per_jiffy value wherever possible.
326 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
329 static unsigned long l_p_j_ref
;
330 static unsigned int l_p_j_ref_freq
;
332 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
335 if (!l_p_j_ref_freq
) {
336 l_p_j_ref
= loops_per_jiffy
;
337 l_p_j_ref_freq
= ci
->old
;
338 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
339 l_p_j_ref
, l_p_j_ref_freq
);
341 if (val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) {
342 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
344 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
345 loops_per_jiffy
, ci
->new);
350 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
351 struct cpufreq_freqs
*freqs
, unsigned int state
)
353 BUG_ON(irqs_disabled());
355 if (cpufreq_disabled())
358 freqs
->flags
= cpufreq_driver
->flags
;
359 pr_debug("notification %u of frequency transition to %u kHz\n",
364 case CPUFREQ_PRECHANGE
:
365 /* detect if the driver reported a value as "old frequency"
366 * which is not equal to what the cpufreq core thinks is
369 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
370 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
371 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
372 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
373 freqs
->old
, policy
->cur
);
374 freqs
->old
= policy
->cur
;
377 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
378 CPUFREQ_PRECHANGE
, freqs
);
379 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
382 case CPUFREQ_POSTCHANGE
:
383 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
384 pr_debug("FREQ: %lu - CPU: %lu\n",
385 (unsigned long)freqs
->new, (unsigned long)freqs
->cpu
);
386 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
387 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
388 CPUFREQ_POSTCHANGE
, freqs
);
389 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
390 policy
->cur
= freqs
->new;
396 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
397 * on frequency transition.
399 * This function calls the transition notifiers and the "adjust_jiffies"
400 * function. It is called twice on all CPU frequency changes that have
403 static void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
404 struct cpufreq_freqs
*freqs
, unsigned int state
)
406 for_each_cpu(freqs
->cpu
, policy
->cpus
)
407 __cpufreq_notify_transition(policy
, freqs
, state
);
410 /* Do post notifications when there are chances that transition has failed */
411 static void cpufreq_notify_post_transition(struct cpufreq_policy
*policy
,
412 struct cpufreq_freqs
*freqs
, int transition_failed
)
414 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
415 if (!transition_failed
)
418 swap(freqs
->old
, freqs
->new);
419 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
420 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
423 void cpufreq_freq_transition_begin(struct cpufreq_policy
*policy
,
424 struct cpufreq_freqs
*freqs
)
428 * Catch double invocations of _begin() which lead to self-deadlock.
429 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
430 * doesn't invoke _begin() on their behalf, and hence the chances of
431 * double invocations are very low. Moreover, there are scenarios
432 * where these checks can emit false-positive warnings in these
433 * drivers; so we avoid that by skipping them altogether.
435 WARN_ON(!(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
)
436 && current
== policy
->transition_task
);
439 wait_event(policy
->transition_wait
, !policy
->transition_ongoing
);
441 spin_lock(&policy
->transition_lock
);
443 if (unlikely(policy
->transition_ongoing
)) {
444 spin_unlock(&policy
->transition_lock
);
448 policy
->transition_ongoing
= true;
449 policy
->transition_task
= current
;
451 spin_unlock(&policy
->transition_lock
);
453 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
455 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin
);
457 void cpufreq_freq_transition_end(struct cpufreq_policy
*policy
,
458 struct cpufreq_freqs
*freqs
, int transition_failed
)
460 if (unlikely(WARN_ON(!policy
->transition_ongoing
)))
463 cpufreq_notify_post_transition(policy
, freqs
, transition_failed
);
465 policy
->transition_ongoing
= false;
466 policy
->transition_task
= NULL
;
468 wake_up(&policy
->transition_wait
);
470 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end
);
473 /*********************************************************************
475 *********************************************************************/
476 static ssize_t
show_boost(struct kobject
*kobj
,
477 struct attribute
*attr
, char *buf
)
479 return sprintf(buf
, "%d\n", cpufreq_driver
->boost_enabled
);
482 static ssize_t
store_boost(struct kobject
*kobj
, struct attribute
*attr
,
483 const char *buf
, size_t count
)
487 ret
= sscanf(buf
, "%d", &enable
);
488 if (ret
!= 1 || enable
< 0 || enable
> 1)
491 if (cpufreq_boost_trigger_state(enable
)) {
492 pr_err("%s: Cannot %s BOOST!\n",
493 __func__
, enable
? "enable" : "disable");
497 pr_debug("%s: cpufreq BOOST %s\n",
498 __func__
, enable
? "enabled" : "disabled");
502 define_one_global_rw(boost
);
504 static struct cpufreq_governor
*find_governor(const char *str_governor
)
506 struct cpufreq_governor
*t
;
509 if (!strncasecmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
516 * cpufreq_parse_governor - parse a governor string
518 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
519 struct cpufreq_governor
**governor
)
523 if (cpufreq_driver
->setpolicy
) {
524 if (!strncasecmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
525 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
527 } else if (!strncasecmp(str_governor
, "powersave",
529 *policy
= CPUFREQ_POLICY_POWERSAVE
;
533 struct cpufreq_governor
*t
;
535 mutex_lock(&cpufreq_governor_mutex
);
537 t
= find_governor(str_governor
);
542 mutex_unlock(&cpufreq_governor_mutex
);
543 ret
= request_module("cpufreq_%s", str_governor
);
544 mutex_lock(&cpufreq_governor_mutex
);
547 t
= find_governor(str_governor
);
555 mutex_unlock(&cpufreq_governor_mutex
);
561 * cpufreq_per_cpu_attr_read() / show_##file_name() -
562 * print out cpufreq information
564 * Write out information from cpufreq_driver->policy[cpu]; object must be
568 #define show_one(file_name, object) \
569 static ssize_t show_##file_name \
570 (struct cpufreq_policy *policy, char *buf) \
572 return sprintf(buf, "%u\n", policy->object); \
575 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
576 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
577 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
578 show_one(scaling_min_freq
, min
);
579 show_one(scaling_max_freq
, max
);
581 static ssize_t
show_scaling_cur_freq(struct cpufreq_policy
*policy
, char *buf
)
585 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
586 ret
= sprintf(buf
, "%u\n", cpufreq_driver
->get(policy
->cpu
));
588 ret
= sprintf(buf
, "%u\n", policy
->cur
);
592 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
593 struct cpufreq_policy
*new_policy
);
596 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
598 #define store_one(file_name, object) \
599 static ssize_t store_##file_name \
600 (struct cpufreq_policy *policy, const char *buf, size_t count) \
603 struct cpufreq_policy new_policy; \
605 memcpy(&new_policy, policy, sizeof(*policy)); \
607 ret = sscanf(buf, "%u", &new_policy.object); \
611 temp = new_policy.object; \
612 ret = cpufreq_set_policy(policy, &new_policy); \
614 policy->user_policy.object = temp; \
616 return ret ? ret : count; \
619 store_one(scaling_min_freq
, min
);
620 store_one(scaling_max_freq
, max
);
623 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
625 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
628 unsigned int cur_freq
= __cpufreq_get(policy
);
630 return sprintf(buf
, "<unknown>");
631 return sprintf(buf
, "%u\n", cur_freq
);
635 * show_scaling_governor - show the current policy for the specified CPU
637 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
639 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
640 return sprintf(buf
, "powersave\n");
641 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
642 return sprintf(buf
, "performance\n");
643 else if (policy
->governor
)
644 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
645 policy
->governor
->name
);
650 * store_scaling_governor - store policy for the specified CPU
652 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
653 const char *buf
, size_t count
)
656 char str_governor
[16];
657 struct cpufreq_policy new_policy
;
659 memcpy(&new_policy
, policy
, sizeof(*policy
));
661 ret
= sscanf(buf
, "%15s", str_governor
);
665 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
666 &new_policy
.governor
))
669 ret
= cpufreq_set_policy(policy
, &new_policy
);
670 return ret
? ret
: count
;
674 * show_scaling_driver - show the cpufreq driver currently loaded
676 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
678 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
682 * show_scaling_available_governors - show the available CPUfreq governors
684 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
688 struct cpufreq_governor
*t
;
691 i
+= sprintf(buf
, "performance powersave");
695 for_each_governor(t
) {
696 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
697 - (CPUFREQ_NAME_LEN
+ 2)))
699 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
702 i
+= sprintf(&buf
[i
], "\n");
706 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
711 for_each_cpu(cpu
, mask
) {
713 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
714 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
715 if (i
>= (PAGE_SIZE
- 5))
718 i
+= sprintf(&buf
[i
], "\n");
721 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
724 * show_related_cpus - show the CPUs affected by each transition even if
725 * hw coordination is in use
727 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
729 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
733 * show_affected_cpus - show the CPUs affected by each transition
735 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
737 return cpufreq_show_cpus(policy
->cpus
, buf
);
740 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
741 const char *buf
, size_t count
)
743 unsigned int freq
= 0;
746 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
749 ret
= sscanf(buf
, "%u", &freq
);
753 policy
->governor
->store_setspeed(policy
, freq
);
758 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
760 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
761 return sprintf(buf
, "<unsupported>\n");
763 return policy
->governor
->show_setspeed(policy
, buf
);
767 * show_bios_limit - show the current cpufreq HW/BIOS limitation
769 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
773 if (cpufreq_driver
->bios_limit
) {
774 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
776 return sprintf(buf
, "%u\n", limit
);
778 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
781 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
782 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
783 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
784 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
785 cpufreq_freq_attr_ro(scaling_available_governors
);
786 cpufreq_freq_attr_ro(scaling_driver
);
787 cpufreq_freq_attr_ro(scaling_cur_freq
);
788 cpufreq_freq_attr_ro(bios_limit
);
789 cpufreq_freq_attr_ro(related_cpus
);
790 cpufreq_freq_attr_ro(affected_cpus
);
791 cpufreq_freq_attr_rw(scaling_min_freq
);
792 cpufreq_freq_attr_rw(scaling_max_freq
);
793 cpufreq_freq_attr_rw(scaling_governor
);
794 cpufreq_freq_attr_rw(scaling_setspeed
);
796 static struct attribute
*default_attrs
[] = {
797 &cpuinfo_min_freq
.attr
,
798 &cpuinfo_max_freq
.attr
,
799 &cpuinfo_transition_latency
.attr
,
800 &scaling_min_freq
.attr
,
801 &scaling_max_freq
.attr
,
804 &scaling_governor
.attr
,
805 &scaling_driver
.attr
,
806 &scaling_available_governors
.attr
,
807 &scaling_setspeed
.attr
,
811 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
812 #define to_attr(a) container_of(a, struct freq_attr, attr)
814 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
816 struct cpufreq_policy
*policy
= to_policy(kobj
);
817 struct freq_attr
*fattr
= to_attr(attr
);
820 down_read(&policy
->rwsem
);
823 ret
= fattr
->show(policy
, buf
);
827 up_read(&policy
->rwsem
);
832 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
833 const char *buf
, size_t count
)
835 struct cpufreq_policy
*policy
= to_policy(kobj
);
836 struct freq_attr
*fattr
= to_attr(attr
);
837 ssize_t ret
= -EINVAL
;
841 if (!cpu_online(policy
->cpu
))
844 down_write(&policy
->rwsem
);
847 ret
= fattr
->store(policy
, buf
, count
);
851 up_write(&policy
->rwsem
);
858 static void cpufreq_sysfs_release(struct kobject
*kobj
)
860 struct cpufreq_policy
*policy
= to_policy(kobj
);
861 pr_debug("last reference is dropped\n");
862 complete(&policy
->kobj_unregister
);
865 static const struct sysfs_ops sysfs_ops
= {
870 static struct kobj_type ktype_cpufreq
= {
871 .sysfs_ops
= &sysfs_ops
,
872 .default_attrs
= default_attrs
,
873 .release
= cpufreq_sysfs_release
,
876 struct kobject
*cpufreq_global_kobject
;
877 EXPORT_SYMBOL(cpufreq_global_kobject
);
879 static int cpufreq_global_kobject_usage
;
881 int cpufreq_get_global_kobject(void)
883 if (!cpufreq_global_kobject_usage
++)
884 return kobject_add(cpufreq_global_kobject
,
885 &cpu_subsys
.dev_root
->kobj
, "%s", "cpufreq");
889 EXPORT_SYMBOL(cpufreq_get_global_kobject
);
891 void cpufreq_put_global_kobject(void)
893 if (!--cpufreq_global_kobject_usage
)
894 kobject_del(cpufreq_global_kobject
);
896 EXPORT_SYMBOL(cpufreq_put_global_kobject
);
898 int cpufreq_sysfs_create_file(const struct attribute
*attr
)
900 int ret
= cpufreq_get_global_kobject();
903 ret
= sysfs_create_file(cpufreq_global_kobject
, attr
);
905 cpufreq_put_global_kobject();
910 EXPORT_SYMBOL(cpufreq_sysfs_create_file
);
912 void cpufreq_sysfs_remove_file(const struct attribute
*attr
)
914 sysfs_remove_file(cpufreq_global_kobject
, attr
);
915 cpufreq_put_global_kobject();
917 EXPORT_SYMBOL(cpufreq_sysfs_remove_file
);
919 static int add_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
921 struct device
*cpu_dev
;
923 pr_debug("%s: Adding symlink for CPU: %u\n", __func__
, cpu
);
928 cpu_dev
= get_cpu_device(cpu
);
929 if (WARN_ON(!cpu_dev
))
932 return sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
, "cpufreq");
935 static void remove_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
937 struct device
*cpu_dev
;
939 pr_debug("%s: Removing symlink for CPU: %u\n", __func__
, cpu
);
941 cpu_dev
= get_cpu_device(cpu
);
942 if (WARN_ON(!cpu_dev
))
945 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
948 /* Add/remove symlinks for all related CPUs */
949 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
954 /* Some related CPUs might not be present (physically hotplugged) */
955 for_each_cpu(j
, policy
->real_cpus
) {
956 if (j
== policy
->kobj_cpu
)
959 ret
= add_cpu_dev_symlink(policy
, j
);
967 static void cpufreq_remove_dev_symlink(struct cpufreq_policy
*policy
)
971 /* Some related CPUs might not be present (physically hotplugged) */
972 for_each_cpu(j
, policy
->real_cpus
) {
973 if (j
== policy
->kobj_cpu
)
976 remove_cpu_dev_symlink(policy
, j
);
980 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
)
982 struct freq_attr
**drv_attr
;
985 /* set up files for this cpu device */
986 drv_attr
= cpufreq_driver
->attr
;
987 while (drv_attr
&& *drv_attr
) {
988 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
993 if (cpufreq_driver
->get
) {
994 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
999 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
1003 if (cpufreq_driver
->bios_limit
) {
1004 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
1009 return cpufreq_add_dev_symlink(policy
);
1012 static int cpufreq_init_policy(struct cpufreq_policy
*policy
)
1014 struct cpufreq_governor
*gov
= NULL
;
1015 struct cpufreq_policy new_policy
;
1017 memcpy(&new_policy
, policy
, sizeof(*policy
));
1019 /* Update governor of new_policy to the governor used before hotplug */
1020 gov
= find_governor(policy
->last_governor
);
1022 pr_debug("Restoring governor %s for cpu %d\n",
1023 policy
->governor
->name
, policy
->cpu
);
1025 gov
= CPUFREQ_DEFAULT_GOVERNOR
;
1027 new_policy
.governor
= gov
;
1029 /* Use the default policy if its valid. */
1030 if (cpufreq_driver
->setpolicy
)
1031 cpufreq_parse_governor(gov
->name
, &new_policy
.policy
, NULL
);
1033 /* set default policy */
1034 return cpufreq_set_policy(policy
, &new_policy
);
1037 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
1041 /* Has this CPU been taken care of already? */
1042 if (cpumask_test_cpu(cpu
, policy
->cpus
))
1046 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1048 pr_err("%s: Failed to stop governor\n", __func__
);
1053 down_write(&policy
->rwsem
);
1054 cpumask_set_cpu(cpu
, policy
->cpus
);
1055 up_write(&policy
->rwsem
);
1058 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1060 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1063 pr_err("%s: Failed to start governor\n", __func__
);
1071 static struct cpufreq_policy
*cpufreq_policy_alloc(unsigned int cpu
)
1073 struct device
*dev
= get_cpu_device(cpu
);
1074 struct cpufreq_policy
*policy
;
1080 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
1084 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
1085 goto err_free_policy
;
1087 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
1088 goto err_free_cpumask
;
1090 if (!zalloc_cpumask_var(&policy
->real_cpus
, GFP_KERNEL
))
1091 goto err_free_rcpumask
;
1093 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
, &dev
->kobj
,
1096 pr_err("%s: failed to init policy->kobj: %d\n", __func__
, ret
);
1097 goto err_free_real_cpus
;
1100 INIT_LIST_HEAD(&policy
->policy_list
);
1101 init_rwsem(&policy
->rwsem
);
1102 spin_lock_init(&policy
->transition_lock
);
1103 init_waitqueue_head(&policy
->transition_wait
);
1104 init_completion(&policy
->kobj_unregister
);
1105 INIT_WORK(&policy
->update
, handle_update
);
1109 /* Set this once on allocation */
1110 policy
->kobj_cpu
= cpu
;
1115 free_cpumask_var(policy
->real_cpus
);
1117 free_cpumask_var(policy
->related_cpus
);
1119 free_cpumask_var(policy
->cpus
);
1126 static void cpufreq_policy_put_kobj(struct cpufreq_policy
*policy
, bool notify
)
1128 struct kobject
*kobj
;
1129 struct completion
*cmp
;
1132 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1133 CPUFREQ_REMOVE_POLICY
, policy
);
1135 down_write(&policy
->rwsem
);
1136 cpufreq_remove_dev_symlink(policy
);
1137 kobj
= &policy
->kobj
;
1138 cmp
= &policy
->kobj_unregister
;
1139 up_write(&policy
->rwsem
);
1143 * We need to make sure that the underlying kobj is
1144 * actually not referenced anymore by anybody before we
1145 * proceed with unloading.
1147 pr_debug("waiting for dropping of refcount\n");
1148 wait_for_completion(cmp
);
1149 pr_debug("wait complete\n");
1152 static void cpufreq_policy_free(struct cpufreq_policy
*policy
, bool notify
)
1154 unsigned long flags
;
1157 /* Remove policy from list */
1158 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1159 list_del(&policy
->policy_list
);
1161 for_each_cpu(cpu
, policy
->related_cpus
)
1162 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1163 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1165 cpufreq_policy_put_kobj(policy
, notify
);
1166 free_cpumask_var(policy
->real_cpus
);
1167 free_cpumask_var(policy
->related_cpus
);
1168 free_cpumask_var(policy
->cpus
);
1172 static int cpufreq_online(unsigned int cpu
)
1174 struct cpufreq_policy
*policy
;
1176 unsigned long flags
;
1180 pr_debug("%s: bringing CPU%u online\n", __func__
, cpu
);
1182 /* Check if this CPU already has a policy to manage it */
1183 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1185 WARN_ON(!cpumask_test_cpu(cpu
, policy
->related_cpus
));
1186 if (!policy_is_inactive(policy
))
1187 return cpufreq_add_policy_cpu(policy
, cpu
);
1189 /* This is the only online CPU for the policy. Start over. */
1191 down_write(&policy
->rwsem
);
1193 policy
->governor
= NULL
;
1194 up_write(&policy
->rwsem
);
1197 policy
= cpufreq_policy_alloc(cpu
);
1202 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1204 /* call driver. From then on the cpufreq must be able
1205 * to accept all calls to ->verify and ->setpolicy for this CPU
1207 ret
= cpufreq_driver
->init(policy
);
1209 pr_debug("initialization failed\n");
1210 goto out_free_policy
;
1213 down_write(&policy
->rwsem
);
1216 /* related_cpus should at least include policy->cpus. */
1217 cpumask_copy(policy
->related_cpus
, policy
->cpus
);
1218 /* Remember CPUs present at the policy creation time. */
1219 cpumask_and(policy
->real_cpus
, policy
->cpus
, cpu_present_mask
);
1223 * affected cpus must always be the one, which are online. We aren't
1224 * managing offline cpus here.
1226 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1229 policy
->user_policy
.min
= policy
->min
;
1230 policy
->user_policy
.max
= policy
->max
;
1232 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1233 for_each_cpu(j
, policy
->related_cpus
)
1234 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1235 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1238 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
1239 policy
->cur
= cpufreq_driver
->get(policy
->cpu
);
1241 pr_err("%s: ->get() failed\n", __func__
);
1242 goto out_exit_policy
;
1247 * Sometimes boot loaders set CPU frequency to a value outside of
1248 * frequency table present with cpufreq core. In such cases CPU might be
1249 * unstable if it has to run on that frequency for long duration of time
1250 * and so its better to set it to a frequency which is specified in
1251 * freq-table. This also makes cpufreq stats inconsistent as
1252 * cpufreq-stats would fail to register because current frequency of CPU
1253 * isn't found in freq-table.
1255 * Because we don't want this change to effect boot process badly, we go
1256 * for the next freq which is >= policy->cur ('cur' must be set by now,
1257 * otherwise we will end up setting freq to lowest of the table as 'cur'
1258 * is initialized to zero).
1260 * We are passing target-freq as "policy->cur - 1" otherwise
1261 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1262 * equal to target-freq.
1264 if ((cpufreq_driver
->flags
& CPUFREQ_NEED_INITIAL_FREQ_CHECK
)
1266 /* Are we running at unknown frequency ? */
1267 ret
= cpufreq_frequency_table_get_index(policy
, policy
->cur
);
1268 if (ret
== -EINVAL
) {
1269 /* Warn user and fix it */
1270 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1271 __func__
, policy
->cpu
, policy
->cur
);
1272 ret
= __cpufreq_driver_target(policy
, policy
->cur
- 1,
1273 CPUFREQ_RELATION_L
);
1276 * Reaching here after boot in a few seconds may not
1277 * mean that system will remain stable at "unknown"
1278 * frequency for longer duration. Hence, a BUG_ON().
1281 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1282 __func__
, policy
->cpu
, policy
->cur
);
1286 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1287 CPUFREQ_START
, policy
);
1290 ret
= cpufreq_add_dev_interface(policy
);
1292 goto out_exit_policy
;
1293 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1294 CPUFREQ_CREATE_POLICY
, policy
);
1296 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1297 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1298 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1301 ret
= cpufreq_init_policy(policy
);
1303 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1304 __func__
, cpu
, ret
);
1305 /* cpufreq_policy_free() will notify based on this */
1307 goto out_exit_policy
;
1310 up_write(&policy
->rwsem
);
1312 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1314 /* Callback for handling stuff after policy is ready */
1315 if (cpufreq_driver
->ready
)
1316 cpufreq_driver
->ready(policy
);
1318 pr_debug("initialization complete\n");
1323 up_write(&policy
->rwsem
);
1325 if (cpufreq_driver
->exit
)
1326 cpufreq_driver
->exit(policy
);
1328 cpufreq_policy_free(policy
, !new_policy
);
1333 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1335 * @sif: Subsystem interface structure pointer (not used)
1337 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1339 unsigned cpu
= dev
->id
;
1342 dev_dbg(dev
, "%s: adding CPU%u\n", __func__
, cpu
);
1344 if (cpu_online(cpu
)) {
1345 ret
= cpufreq_online(cpu
);
1348 * A hotplug notifier will follow and we will handle it as CPU
1349 * online then. For now, just create the sysfs link, unless
1350 * there is no policy or the link is already present.
1352 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1354 ret
= policy
&& !cpumask_test_and_set_cpu(cpu
, policy
->real_cpus
)
1355 ? add_cpu_dev_symlink(policy
, cpu
) : 0;
1361 static void cpufreq_offline_prepare(unsigned int cpu
)
1363 struct cpufreq_policy
*policy
;
1365 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1367 policy
= cpufreq_cpu_get_raw(cpu
);
1369 pr_debug("%s: No cpu_data found\n", __func__
);
1374 int ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1376 pr_err("%s: Failed to stop governor\n", __func__
);
1379 down_write(&policy
->rwsem
);
1380 cpumask_clear_cpu(cpu
, policy
->cpus
);
1382 if (policy_is_inactive(policy
)) {
1384 strncpy(policy
->last_governor
, policy
->governor
->name
,
1386 } else if (cpu
== policy
->cpu
) {
1387 /* Nominate new CPU */
1388 policy
->cpu
= cpumask_any(policy
->cpus
);
1390 up_write(&policy
->rwsem
);
1392 /* Start governor again for active policy */
1393 if (!policy_is_inactive(policy
)) {
1395 int ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1397 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1400 pr_err("%s: Failed to start governor\n", __func__
);
1402 } else if (cpufreq_driver
->stop_cpu
) {
1403 cpufreq_driver
->stop_cpu(policy
);
1407 static void cpufreq_offline_finish(unsigned int cpu
)
1409 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1412 pr_debug("%s: No cpu_data found\n", __func__
);
1416 /* Only proceed for inactive policies */
1417 if (!policy_is_inactive(policy
))
1420 /* If cpu is last user of policy, free policy */
1422 int ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
1424 pr_err("%s: Failed to exit governor\n", __func__
);
1428 * Perform the ->exit() even during light-weight tear-down,
1429 * since this is a core component, and is essential for the
1430 * subsequent light-weight ->init() to succeed.
1432 if (cpufreq_driver
->exit
) {
1433 cpufreq_driver
->exit(policy
);
1434 policy
->freq_table
= NULL
;
1439 * cpufreq_remove_dev - remove a CPU device
1441 * Removes the cpufreq interface for a CPU device.
1443 static void cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1445 unsigned int cpu
= dev
->id
;
1446 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1451 if (cpu_online(cpu
)) {
1452 cpufreq_offline_prepare(cpu
);
1453 cpufreq_offline_finish(cpu
);
1456 cpumask_clear_cpu(cpu
, policy
->real_cpus
);
1458 if (cpumask_empty(policy
->real_cpus
)) {
1459 cpufreq_policy_free(policy
, true);
1463 if (cpu
!= policy
->kobj_cpu
) {
1464 remove_cpu_dev_symlink(policy
, cpu
);
1467 * The CPU owning the policy object is going away. Move it to
1468 * another suitable CPU.
1470 unsigned int new_cpu
= cpumask_first(policy
->real_cpus
);
1471 struct device
*new_dev
= get_cpu_device(new_cpu
);
1473 dev_dbg(dev
, "%s: Moving policy object to CPU%u\n", __func__
, new_cpu
);
1475 sysfs_remove_link(&new_dev
->kobj
, "cpufreq");
1476 policy
->kobj_cpu
= new_cpu
;
1477 WARN_ON(kobject_move(&policy
->kobj
, &new_dev
->kobj
));
1481 static void handle_update(struct work_struct
*work
)
1483 struct cpufreq_policy
*policy
=
1484 container_of(work
, struct cpufreq_policy
, update
);
1485 unsigned int cpu
= policy
->cpu
;
1486 pr_debug("handle_update for cpu %u called\n", cpu
);
1487 cpufreq_update_policy(cpu
);
1491 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1493 * @policy: policy managing CPUs
1494 * @new_freq: CPU frequency the CPU actually runs at
1496 * We adjust to current frequency first, and need to clean up later.
1497 * So either call to cpufreq_update_policy() or schedule handle_update()).
1499 static void cpufreq_out_of_sync(struct cpufreq_policy
*policy
,
1500 unsigned int new_freq
)
1502 struct cpufreq_freqs freqs
;
1504 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1505 policy
->cur
, new_freq
);
1507 freqs
.old
= policy
->cur
;
1508 freqs
.new = new_freq
;
1510 cpufreq_freq_transition_begin(policy
, &freqs
);
1511 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1515 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1518 * This is the last known freq, without actually getting it from the driver.
1519 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1521 unsigned int cpufreq_quick_get(unsigned int cpu
)
1523 struct cpufreq_policy
*policy
;
1524 unsigned int ret_freq
= 0;
1526 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
1527 return cpufreq_driver
->get(cpu
);
1529 policy
= cpufreq_cpu_get(cpu
);
1531 ret_freq
= policy
->cur
;
1532 cpufreq_cpu_put(policy
);
1537 EXPORT_SYMBOL(cpufreq_quick_get
);
1540 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1543 * Just return the max possible frequency for a given CPU.
1545 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1547 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1548 unsigned int ret_freq
= 0;
1551 ret_freq
= policy
->max
;
1552 cpufreq_cpu_put(policy
);
1557 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1559 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
)
1561 unsigned int ret_freq
= 0;
1563 if (!cpufreq_driver
->get
)
1566 ret_freq
= cpufreq_driver
->get(policy
->cpu
);
1568 /* Updating inactive policies is invalid, so avoid doing that. */
1569 if (unlikely(policy_is_inactive(policy
)))
1572 if (ret_freq
&& policy
->cur
&&
1573 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1574 /* verify no discrepancy between actual and
1575 saved value exists */
1576 if (unlikely(ret_freq
!= policy
->cur
)) {
1577 cpufreq_out_of_sync(policy
, ret_freq
);
1578 schedule_work(&policy
->update
);
1586 * cpufreq_get - get the current CPU frequency (in kHz)
1589 * Get the CPU current (static) CPU frequency
1591 unsigned int cpufreq_get(unsigned int cpu
)
1593 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1594 unsigned int ret_freq
= 0;
1597 down_read(&policy
->rwsem
);
1598 ret_freq
= __cpufreq_get(policy
);
1599 up_read(&policy
->rwsem
);
1601 cpufreq_cpu_put(policy
);
1606 EXPORT_SYMBOL(cpufreq_get
);
1608 static struct subsys_interface cpufreq_interface
= {
1610 .subsys
= &cpu_subsys
,
1611 .add_dev
= cpufreq_add_dev
,
1612 .remove_dev
= cpufreq_remove_dev
,
1616 * In case platform wants some specific frequency to be configured
1619 int cpufreq_generic_suspend(struct cpufreq_policy
*policy
)
1623 if (!policy
->suspend_freq
) {
1624 pr_debug("%s: suspend_freq not defined\n", __func__
);
1628 pr_debug("%s: Setting suspend-freq: %u\n", __func__
,
1629 policy
->suspend_freq
);
1631 ret
= __cpufreq_driver_target(policy
, policy
->suspend_freq
,
1632 CPUFREQ_RELATION_H
);
1634 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1635 __func__
, policy
->suspend_freq
, ret
);
1639 EXPORT_SYMBOL(cpufreq_generic_suspend
);
1642 * cpufreq_suspend() - Suspend CPUFreq governors
1644 * Called during system wide Suspend/Hibernate cycles for suspending governors
1645 * as some platforms can't change frequency after this point in suspend cycle.
1646 * Because some of the devices (like: i2c, regulators, etc) they use for
1647 * changing frequency are suspended quickly after this point.
1649 void cpufreq_suspend(void)
1651 struct cpufreq_policy
*policy
;
1653 if (!cpufreq_driver
)
1659 pr_debug("%s: Suspending Governors\n", __func__
);
1661 for_each_active_policy(policy
) {
1662 if (__cpufreq_governor(policy
, CPUFREQ_GOV_STOP
))
1663 pr_err("%s: Failed to stop governor for policy: %p\n",
1665 else if (cpufreq_driver
->suspend
1666 && cpufreq_driver
->suspend(policy
))
1667 pr_err("%s: Failed to suspend driver: %p\n", __func__
,
1672 cpufreq_suspended
= true;
1676 * cpufreq_resume() - Resume CPUFreq governors
1678 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1679 * are suspended with cpufreq_suspend().
1681 void cpufreq_resume(void)
1683 struct cpufreq_policy
*policy
;
1685 if (!cpufreq_driver
)
1688 cpufreq_suspended
= false;
1693 pr_debug("%s: Resuming Governors\n", __func__
);
1695 for_each_active_policy(policy
) {
1696 if (cpufreq_driver
->resume
&& cpufreq_driver
->resume(policy
))
1697 pr_err("%s: Failed to resume driver: %p\n", __func__
,
1699 else if (__cpufreq_governor(policy
, CPUFREQ_GOV_START
)
1700 || __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))
1701 pr_err("%s: Failed to start governor for policy: %p\n",
1706 * schedule call cpufreq_update_policy() for first-online CPU, as that
1707 * wouldn't be hotplugged-out on suspend. It will verify that the
1708 * current freq is in sync with what we believe it to be.
1710 policy
= cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask
));
1711 if (WARN_ON(!policy
))
1714 schedule_work(&policy
->update
);
1718 * cpufreq_get_current_driver - return current driver's name
1720 * Return the name string of the currently loaded cpufreq driver
1723 const char *cpufreq_get_current_driver(void)
1726 return cpufreq_driver
->name
;
1730 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1733 * cpufreq_get_driver_data - return current driver data
1735 * Return the private data of the currently loaded cpufreq
1736 * driver, or NULL if no cpufreq driver is loaded.
1738 void *cpufreq_get_driver_data(void)
1741 return cpufreq_driver
->driver_data
;
1745 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data
);
1747 /*********************************************************************
1748 * NOTIFIER LISTS INTERFACE *
1749 *********************************************************************/
1752 * cpufreq_register_notifier - register a driver with cpufreq
1753 * @nb: notifier function to register
1754 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1756 * Add a driver to one of two lists: either a list of drivers that
1757 * are notified about clock rate changes (once before and once after
1758 * the transition), or a list of drivers that are notified about
1759 * changes in cpufreq policy.
1761 * This function may sleep, and has the same return conditions as
1762 * blocking_notifier_chain_register.
1764 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1768 if (cpufreq_disabled())
1771 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1774 case CPUFREQ_TRANSITION_NOTIFIER
:
1775 ret
= srcu_notifier_chain_register(
1776 &cpufreq_transition_notifier_list
, nb
);
1778 case CPUFREQ_POLICY_NOTIFIER
:
1779 ret
= blocking_notifier_chain_register(
1780 &cpufreq_policy_notifier_list
, nb
);
1788 EXPORT_SYMBOL(cpufreq_register_notifier
);
1791 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1792 * @nb: notifier block to be unregistered
1793 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1795 * Remove a driver from the CPU frequency notifier list.
1797 * This function may sleep, and has the same return conditions as
1798 * blocking_notifier_chain_unregister.
1800 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1804 if (cpufreq_disabled())
1808 case CPUFREQ_TRANSITION_NOTIFIER
:
1809 ret
= srcu_notifier_chain_unregister(
1810 &cpufreq_transition_notifier_list
, nb
);
1812 case CPUFREQ_POLICY_NOTIFIER
:
1813 ret
= blocking_notifier_chain_unregister(
1814 &cpufreq_policy_notifier_list
, nb
);
1822 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1825 /*********************************************************************
1827 *********************************************************************/
1829 /* Must set freqs->new to intermediate frequency */
1830 static int __target_intermediate(struct cpufreq_policy
*policy
,
1831 struct cpufreq_freqs
*freqs
, int index
)
1835 freqs
->new = cpufreq_driver
->get_intermediate(policy
, index
);
1837 /* We don't need to switch to intermediate freq */
1841 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1842 __func__
, policy
->cpu
, freqs
->old
, freqs
->new);
1844 cpufreq_freq_transition_begin(policy
, freqs
);
1845 ret
= cpufreq_driver
->target_intermediate(policy
, index
);
1846 cpufreq_freq_transition_end(policy
, freqs
, ret
);
1849 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1855 static int __target_index(struct cpufreq_policy
*policy
,
1856 struct cpufreq_frequency_table
*freq_table
, int index
)
1858 struct cpufreq_freqs freqs
= {.old
= policy
->cur
, .flags
= 0};
1859 unsigned int intermediate_freq
= 0;
1860 int retval
= -EINVAL
;
1863 notify
= !(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
);
1865 /* Handle switching to intermediate frequency */
1866 if (cpufreq_driver
->get_intermediate
) {
1867 retval
= __target_intermediate(policy
, &freqs
, index
);
1871 intermediate_freq
= freqs
.new;
1872 /* Set old freq to intermediate */
1873 if (intermediate_freq
)
1874 freqs
.old
= freqs
.new;
1877 freqs
.new = freq_table
[index
].frequency
;
1878 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1879 __func__
, policy
->cpu
, freqs
.old
, freqs
.new);
1881 cpufreq_freq_transition_begin(policy
, &freqs
);
1884 retval
= cpufreq_driver
->target_index(policy
, index
);
1886 pr_err("%s: Failed to change cpu frequency: %d\n", __func__
,
1890 cpufreq_freq_transition_end(policy
, &freqs
, retval
);
1893 * Failed after setting to intermediate freq? Driver should have
1894 * reverted back to initial frequency and so should we. Check
1895 * here for intermediate_freq instead of get_intermediate, in
1896 * case we haven't switched to intermediate freq at all.
1898 if (unlikely(retval
&& intermediate_freq
)) {
1899 freqs
.old
= intermediate_freq
;
1900 freqs
.new = policy
->restore_freq
;
1901 cpufreq_freq_transition_begin(policy
, &freqs
);
1902 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1909 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1910 unsigned int target_freq
,
1911 unsigned int relation
)
1913 unsigned int old_target_freq
= target_freq
;
1914 int retval
= -EINVAL
;
1916 if (cpufreq_disabled())
1919 /* Make sure that target_freq is within supported range */
1920 if (target_freq
> policy
->max
)
1921 target_freq
= policy
->max
;
1922 if (target_freq
< policy
->min
)
1923 target_freq
= policy
->min
;
1925 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1926 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1929 * This might look like a redundant call as we are checking it again
1930 * after finding index. But it is left intentionally for cases where
1931 * exactly same freq is called again and so we can save on few function
1934 if (target_freq
== policy
->cur
)
1937 /* Save last value to restore later on errors */
1938 policy
->restore_freq
= policy
->cur
;
1940 if (cpufreq_driver
->target
)
1941 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
1942 else if (cpufreq_driver
->target_index
) {
1943 struct cpufreq_frequency_table
*freq_table
;
1946 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
1947 if (unlikely(!freq_table
)) {
1948 pr_err("%s: Unable to find freq_table\n", __func__
);
1952 retval
= cpufreq_frequency_table_target(policy
, freq_table
,
1953 target_freq
, relation
, &index
);
1954 if (unlikely(retval
)) {
1955 pr_err("%s: Unable to find matching freq\n", __func__
);
1959 if (freq_table
[index
].frequency
== policy
->cur
) {
1964 retval
= __target_index(policy
, freq_table
, index
);
1970 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1972 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1973 unsigned int target_freq
,
1974 unsigned int relation
)
1978 down_write(&policy
->rwsem
);
1980 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1982 up_write(&policy
->rwsem
);
1986 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1988 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1993 /* Only must be defined when default governor is known to have latency
1994 restrictions, like e.g. conservative or ondemand.
1995 That this is the case is already ensured in Kconfig
1997 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1998 struct cpufreq_governor
*gov
= &cpufreq_gov_performance
;
2000 struct cpufreq_governor
*gov
= NULL
;
2003 /* Don't start any governor operations if we are entering suspend */
2004 if (cpufreq_suspended
)
2007 * Governor might not be initiated here if ACPI _PPC changed
2008 * notification happened, so check it.
2010 if (!policy
->governor
)
2013 if (policy
->governor
->max_transition_latency
&&
2014 policy
->cpuinfo
.transition_latency
>
2015 policy
->governor
->max_transition_latency
) {
2019 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2020 policy
->governor
->name
, gov
->name
);
2021 policy
->governor
= gov
;
2025 if (event
== CPUFREQ_GOV_POLICY_INIT
)
2026 if (!try_module_get(policy
->governor
->owner
))
2029 pr_debug("%s: for CPU %u, event %u\n", __func__
, policy
->cpu
, event
);
2031 mutex_lock(&cpufreq_governor_lock
);
2032 if ((policy
->governor_enabled
&& event
== CPUFREQ_GOV_START
)
2033 || (!policy
->governor_enabled
2034 && (event
== CPUFREQ_GOV_LIMITS
|| event
== CPUFREQ_GOV_STOP
))) {
2035 mutex_unlock(&cpufreq_governor_lock
);
2039 if (event
== CPUFREQ_GOV_STOP
)
2040 policy
->governor_enabled
= false;
2041 else if (event
== CPUFREQ_GOV_START
)
2042 policy
->governor_enabled
= true;
2044 mutex_unlock(&cpufreq_governor_lock
);
2046 ret
= policy
->governor
->governor(policy
, event
);
2049 if (event
== CPUFREQ_GOV_POLICY_INIT
)
2050 policy
->governor
->initialized
++;
2051 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
2052 policy
->governor
->initialized
--;
2054 /* Restore original values */
2055 mutex_lock(&cpufreq_governor_lock
);
2056 if (event
== CPUFREQ_GOV_STOP
)
2057 policy
->governor_enabled
= true;
2058 else if (event
== CPUFREQ_GOV_START
)
2059 policy
->governor_enabled
= false;
2060 mutex_unlock(&cpufreq_governor_lock
);
2063 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
2064 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
2065 module_put(policy
->governor
->owner
);
2070 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
2077 if (cpufreq_disabled())
2080 mutex_lock(&cpufreq_governor_mutex
);
2082 governor
->initialized
= 0;
2084 if (!find_governor(governor
->name
)) {
2086 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
2089 mutex_unlock(&cpufreq_governor_mutex
);
2092 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
2094 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
2096 struct cpufreq_policy
*policy
;
2097 unsigned long flags
;
2102 if (cpufreq_disabled())
2105 /* clear last_governor for all inactive policies */
2106 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
2107 for_each_inactive_policy(policy
) {
2108 if (!strcmp(policy
->last_governor
, governor
->name
)) {
2109 policy
->governor
= NULL
;
2110 strcpy(policy
->last_governor
, "\0");
2113 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2115 mutex_lock(&cpufreq_governor_mutex
);
2116 list_del(&governor
->governor_list
);
2117 mutex_unlock(&cpufreq_governor_mutex
);
2120 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
2123 /*********************************************************************
2124 * POLICY INTERFACE *
2125 *********************************************************************/
2128 * cpufreq_get_policy - get the current cpufreq_policy
2129 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2132 * Reads the current cpufreq policy.
2134 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
2136 struct cpufreq_policy
*cpu_policy
;
2140 cpu_policy
= cpufreq_cpu_get(cpu
);
2144 memcpy(policy
, cpu_policy
, sizeof(*policy
));
2146 cpufreq_cpu_put(cpu_policy
);
2149 EXPORT_SYMBOL(cpufreq_get_policy
);
2152 * policy : current policy.
2153 * new_policy: policy to be set.
2155 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
2156 struct cpufreq_policy
*new_policy
)
2158 struct cpufreq_governor
*old_gov
;
2161 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2162 new_policy
->cpu
, new_policy
->min
, new_policy
->max
);
2164 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
2167 * This check works well when we store new min/max freq attributes,
2168 * because new_policy is a copy of policy with one field updated.
2170 if (new_policy
->min
> new_policy
->max
)
2173 /* verify the cpu speed can be set within this limit */
2174 ret
= cpufreq_driver
->verify(new_policy
);
2178 /* adjust if necessary - all reasons */
2179 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2180 CPUFREQ_ADJUST
, new_policy
);
2183 * verify the cpu speed can be set within this limit, which might be
2184 * different to the first one
2186 ret
= cpufreq_driver
->verify(new_policy
);
2190 /* notification of the new policy */
2191 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2192 CPUFREQ_NOTIFY
, new_policy
);
2194 policy
->min
= new_policy
->min
;
2195 policy
->max
= new_policy
->max
;
2197 pr_debug("new min and max freqs are %u - %u kHz\n",
2198 policy
->min
, policy
->max
);
2200 if (cpufreq_driver
->setpolicy
) {
2201 policy
->policy
= new_policy
->policy
;
2202 pr_debug("setting range\n");
2203 return cpufreq_driver
->setpolicy(new_policy
);
2206 if (new_policy
->governor
== policy
->governor
)
2209 pr_debug("governor switch\n");
2211 /* save old, working values */
2212 old_gov
= policy
->governor
;
2213 /* end old governor */
2215 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
2217 /* This can happen due to race with other operations */
2218 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2219 __func__
, old_gov
->name
, ret
);
2223 up_write(&policy
->rwsem
);
2224 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2225 down_write(&policy
->rwsem
);
2228 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2229 __func__
, old_gov
->name
, ret
);
2234 /* start new governor */
2235 policy
->governor
= new_policy
->governor
;
2236 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
);
2238 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2242 up_write(&policy
->rwsem
);
2243 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2244 down_write(&policy
->rwsem
);
2247 /* new governor failed, so re-start old one */
2248 pr_debug("starting governor %s failed\n", policy
->governor
->name
);
2250 policy
->governor
= old_gov
;
2251 if (__cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
))
2252 policy
->governor
= NULL
;
2254 __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2260 pr_debug("governor: change or update limits\n");
2261 return __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2265 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2266 * @cpu: CPU which shall be re-evaluated
2268 * Useful for policy notifiers which have different necessities
2269 * at different times.
2271 int cpufreq_update_policy(unsigned int cpu
)
2273 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
2274 struct cpufreq_policy new_policy
;
2280 down_write(&policy
->rwsem
);
2282 pr_debug("updating policy for CPU %u\n", cpu
);
2283 memcpy(&new_policy
, policy
, sizeof(*policy
));
2284 new_policy
.min
= policy
->user_policy
.min
;
2285 new_policy
.max
= policy
->user_policy
.max
;
2288 * BIOS might change freq behind our back
2289 * -> ask driver for current freq and notify governors about a change
2291 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
2292 new_policy
.cur
= cpufreq_driver
->get(cpu
);
2293 if (WARN_ON(!new_policy
.cur
)) {
2299 pr_debug("Driver did not initialize current freq\n");
2300 policy
->cur
= new_policy
.cur
;
2302 if (policy
->cur
!= new_policy
.cur
&& has_target())
2303 cpufreq_out_of_sync(policy
, new_policy
.cur
);
2307 ret
= cpufreq_set_policy(policy
, &new_policy
);
2310 up_write(&policy
->rwsem
);
2312 cpufreq_cpu_put(policy
);
2315 EXPORT_SYMBOL(cpufreq_update_policy
);
2317 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2318 unsigned long action
, void *hcpu
)
2320 unsigned int cpu
= (unsigned long)hcpu
;
2322 switch (action
& ~CPU_TASKS_FROZEN
) {
2324 cpufreq_online(cpu
);
2327 case CPU_DOWN_PREPARE
:
2328 cpufreq_offline_prepare(cpu
);
2332 cpufreq_offline_finish(cpu
);
2335 case CPU_DOWN_FAILED
:
2336 cpufreq_online(cpu
);
2342 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2343 .notifier_call
= cpufreq_cpu_callback
,
2346 /*********************************************************************
2348 *********************************************************************/
2349 static int cpufreq_boost_set_sw(int state
)
2351 struct cpufreq_frequency_table
*freq_table
;
2352 struct cpufreq_policy
*policy
;
2355 for_each_active_policy(policy
) {
2356 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
2358 ret
= cpufreq_frequency_table_cpuinfo(policy
,
2361 pr_err("%s: Policy frequency update failed\n",
2365 policy
->user_policy
.max
= policy
->max
;
2366 __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2373 int cpufreq_boost_trigger_state(int state
)
2375 unsigned long flags
;
2378 if (cpufreq_driver
->boost_enabled
== state
)
2381 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2382 cpufreq_driver
->boost_enabled
= state
;
2383 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2385 ret
= cpufreq_driver
->set_boost(state
);
2387 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2388 cpufreq_driver
->boost_enabled
= !state
;
2389 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2391 pr_err("%s: Cannot %s BOOST\n",
2392 __func__
, state
? "enable" : "disable");
2398 int cpufreq_boost_supported(void)
2400 if (likely(cpufreq_driver
))
2401 return cpufreq_driver
->boost_supported
;
2405 EXPORT_SYMBOL_GPL(cpufreq_boost_supported
);
2407 static int create_boost_sysfs_file(void)
2411 if (!cpufreq_boost_supported())
2415 * Check if driver provides function to enable boost -
2416 * if not, use cpufreq_boost_set_sw as default
2418 if (!cpufreq_driver
->set_boost
)
2419 cpufreq_driver
->set_boost
= cpufreq_boost_set_sw
;
2421 ret
= cpufreq_sysfs_create_file(&boost
.attr
);
2423 pr_err("%s: cannot register global BOOST sysfs file\n",
2429 static void remove_boost_sysfs_file(void)
2431 if (cpufreq_boost_supported())
2432 cpufreq_sysfs_remove_file(&boost
.attr
);
2435 int cpufreq_enable_boost_support(void)
2437 if (!cpufreq_driver
)
2440 if (cpufreq_boost_supported())
2443 cpufreq_driver
->boost_supported
= true;
2445 /* This will get removed on driver unregister */
2446 return create_boost_sysfs_file();
2448 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support
);
2450 int cpufreq_boost_enabled(void)
2452 return cpufreq_driver
->boost_enabled
;
2454 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled
);
2456 /*********************************************************************
2457 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2458 *********************************************************************/
2461 * cpufreq_register_driver - register a CPU Frequency driver
2462 * @driver_data: A struct cpufreq_driver containing the values#
2463 * submitted by the CPU Frequency driver.
2465 * Registers a CPU Frequency driver to this core code. This code
2466 * returns zero on success, -EBUSY when another driver got here first
2467 * (and isn't unregistered in the meantime).
2470 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2472 unsigned long flags
;
2475 if (cpufreq_disabled())
2478 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2479 !(driver_data
->setpolicy
|| driver_data
->target_index
||
2480 driver_data
->target
) ||
2481 (driver_data
->setpolicy
&& (driver_data
->target_index
||
2482 driver_data
->target
)) ||
2483 (!!driver_data
->get_intermediate
!= !!driver_data
->target_intermediate
))
2486 pr_debug("trying to register driver %s\n", driver_data
->name
);
2488 /* Protect against concurrent CPU online/offline. */
2491 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2492 if (cpufreq_driver
) {
2493 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2497 cpufreq_driver
= driver_data
;
2498 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2500 if (driver_data
->setpolicy
)
2501 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2503 ret
= create_boost_sysfs_file();
2505 goto err_null_driver
;
2507 ret
= subsys_interface_register(&cpufreq_interface
);
2509 goto err_boost_unreg
;
2511 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
) &&
2512 list_empty(&cpufreq_policy_list
)) {
2513 /* if all ->init() calls failed, unregister */
2514 pr_debug("%s: No CPU initialized for driver %s\n", __func__
,
2519 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2520 pr_debug("driver %s up and running\n", driver_data
->name
);
2527 subsys_interface_unregister(&cpufreq_interface
);
2529 remove_boost_sysfs_file();
2531 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2532 cpufreq_driver
= NULL
;
2533 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2536 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2539 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2541 * Unregister the current CPUFreq driver. Only call this if you have
2542 * the right to do so, i.e. if you have succeeded in initialising before!
2543 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2544 * currently not initialised.
2546 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2548 unsigned long flags
;
2550 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2553 pr_debug("unregistering driver %s\n", driver
->name
);
2555 /* Protect against concurrent cpu hotplug */
2557 subsys_interface_unregister(&cpufreq_interface
);
2558 remove_boost_sysfs_file();
2559 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2561 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2563 cpufreq_driver
= NULL
;
2565 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2570 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2573 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2574 * or mutexes when secondary CPUs are halted.
2576 static struct syscore_ops cpufreq_syscore_ops
= {
2577 .shutdown
= cpufreq_suspend
,
2580 static int __init
cpufreq_core_init(void)
2582 if (cpufreq_disabled())
2585 cpufreq_global_kobject
= kobject_create();
2586 BUG_ON(!cpufreq_global_kobject
);
2588 register_syscore_ops(&cpufreq_syscore_ops
);
2592 core_initcall(cpufreq_core_init
);