2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list
);
36 static inline bool policy_is_inactive(struct cpufreq_policy
*policy
)
38 return cpumask_empty(policy
->cpus
);
41 static bool suitable_policy(struct cpufreq_policy
*policy
, bool active
)
43 return active
== !policy_is_inactive(policy
);
46 /* Finds Next Acive/Inactive policy */
47 static struct cpufreq_policy
*next_policy(struct cpufreq_policy
*policy
,
51 policy
= list_next_entry(policy
, policy_list
);
53 /* No more policies in the list */
54 if (&policy
->policy_list
== &cpufreq_policy_list
)
56 } while (!suitable_policy(policy
, active
));
61 static struct cpufreq_policy
*first_policy(bool active
)
63 struct cpufreq_policy
*policy
;
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list
))
69 policy
= list_first_entry(&cpufreq_policy_list
, typeof(*policy
),
72 if (!suitable_policy(policy
, active
))
73 policy
= next_policy(policy
, active
);
78 /* Macros to iterate over CPU policies */
79 #define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
82 __policy = next_policy(__policy, __active))
84 #define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86 #define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
89 #define for_each_policy(__policy) \
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
92 /* Iterate over governors */
93 static LIST_HEAD(cpufreq_governor_list
);
94 #define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
98 * The "cpufreq driver" - the arch- or hardware-dependent low
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
102 static struct cpufreq_driver
*cpufreq_driver
;
103 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
104 static DEFINE_RWLOCK(cpufreq_driver_lock
);
105 DEFINE_MUTEX(cpufreq_governor_lock
);
107 /* Flag to suspend/resume CPUFreq governors */
108 static bool cpufreq_suspended
;
110 static inline bool has_target(void)
112 return cpufreq_driver
->target_index
|| cpufreq_driver
->target
;
115 /* internal prototypes */
116 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
118 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
);
119 static void handle_update(struct work_struct
*work
);
122 * Two notifier lists: the "policy" list is involved in the
123 * validation process for a new CPU frequency policy; the
124 * "transition" list for kernel code that needs to handle
125 * changes to devices when the CPU clock speed changes.
126 * The mutex locks both lists.
128 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
129 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
131 static bool init_cpufreq_transition_notifier_list_called
;
132 static int __init
init_cpufreq_transition_notifier_list(void)
134 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
135 init_cpufreq_transition_notifier_list_called
= true;
138 pure_initcall(init_cpufreq_transition_notifier_list
);
140 static int off __read_mostly
;
141 static int cpufreq_disabled(void)
145 void disable_cpufreq(void)
149 static DEFINE_MUTEX(cpufreq_governor_mutex
);
151 bool have_governor_per_policy(void)
153 return !!(cpufreq_driver
->flags
& CPUFREQ_HAVE_GOVERNOR_PER_POLICY
);
155 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
157 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
159 if (have_governor_per_policy())
160 return &policy
->kobj
;
162 return cpufreq_global_kobject
;
164 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
166 struct cpufreq_frequency_table
*cpufreq_frequency_get_table(unsigned int cpu
)
168 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
170 return policy
&& !policy_is_inactive(policy
) ?
171 policy
->freq_table
: NULL
;
173 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table
);
175 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
181 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
183 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
184 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
185 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
186 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
187 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
188 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
190 idle_time
= cur_wall_time
- busy_time
;
192 *wall
= cputime_to_usecs(cur_wall_time
);
194 return cputime_to_usecs(idle_time
);
197 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
199 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
201 if (idle_time
== -1ULL)
202 return get_cpu_idle_time_jiffy(cpu
, wall
);
204 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
208 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
211 * This is a generic cpufreq init() routine which can be used by cpufreq
212 * drivers of SMP systems. It will do following:
213 * - validate & show freq table passed
214 * - set policies transition latency
215 * - policy->cpus with all possible CPUs
217 int cpufreq_generic_init(struct cpufreq_policy
*policy
,
218 struct cpufreq_frequency_table
*table
,
219 unsigned int transition_latency
)
223 ret
= cpufreq_table_validate_and_show(policy
, table
);
225 pr_err("%s: invalid frequency table: %d\n", __func__
, ret
);
229 policy
->cpuinfo
.transition_latency
= transition_latency
;
232 * The driver only supports the SMP configuration where all processors
233 * share the clock and voltage and clock.
235 cpumask_setall(policy
->cpus
);
239 EXPORT_SYMBOL_GPL(cpufreq_generic_init
);
241 struct cpufreq_policy
*cpufreq_cpu_get_raw(unsigned int cpu
)
243 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
245 return policy
&& cpumask_test_cpu(cpu
, policy
->cpus
) ? policy
: NULL
;
247 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw
);
249 unsigned int cpufreq_generic_get(unsigned int cpu
)
251 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(cpu
);
253 if (!policy
|| IS_ERR(policy
->clk
)) {
254 pr_err("%s: No %s associated to cpu: %d\n",
255 __func__
, policy
? "clk" : "policy", cpu
);
259 return clk_get_rate(policy
->clk
) / 1000;
261 EXPORT_SYMBOL_GPL(cpufreq_generic_get
);
264 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
266 * @cpu: cpu to find policy for.
268 * This returns policy for 'cpu', returns NULL if it doesn't exist.
269 * It also increments the kobject reference count to mark it busy and so would
270 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
271 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
272 * freed as that depends on the kobj count.
274 * Return: A valid policy on success, otherwise NULL on failure.
276 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
278 struct cpufreq_policy
*policy
= NULL
;
281 if (WARN_ON(cpu
>= nr_cpu_ids
))
284 /* get the cpufreq driver */
285 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
287 if (cpufreq_driver
) {
289 policy
= cpufreq_cpu_get_raw(cpu
);
291 kobject_get(&policy
->kobj
);
294 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
298 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
301 * cpufreq_cpu_put: Decrements the usage count of a policy
303 * @policy: policy earlier returned by cpufreq_cpu_get().
305 * This decrements the kobject reference count incremented earlier by calling
308 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
310 kobject_put(&policy
->kobj
);
312 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
314 /*********************************************************************
315 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
316 *********************************************************************/
319 * adjust_jiffies - adjust the system "loops_per_jiffy"
321 * This function alters the system "loops_per_jiffy" for the clock
322 * speed change. Note that loops_per_jiffy cannot be updated on SMP
323 * systems as each CPU might be scaled differently. So, use the arch
324 * per-CPU loops_per_jiffy value wherever possible.
326 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
329 static unsigned long l_p_j_ref
;
330 static unsigned int l_p_j_ref_freq
;
332 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
335 if (!l_p_j_ref_freq
) {
336 l_p_j_ref
= loops_per_jiffy
;
337 l_p_j_ref_freq
= ci
->old
;
338 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
339 l_p_j_ref
, l_p_j_ref_freq
);
341 if (val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) {
342 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
344 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
345 loops_per_jiffy
, ci
->new);
350 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
351 struct cpufreq_freqs
*freqs
, unsigned int state
)
353 BUG_ON(irqs_disabled());
355 if (cpufreq_disabled())
358 freqs
->flags
= cpufreq_driver
->flags
;
359 pr_debug("notification %u of frequency transition to %u kHz\n",
364 case CPUFREQ_PRECHANGE
:
365 /* detect if the driver reported a value as "old frequency"
366 * which is not equal to what the cpufreq core thinks is
369 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
370 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
371 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
372 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
373 freqs
->old
, policy
->cur
);
374 freqs
->old
= policy
->cur
;
377 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
378 CPUFREQ_PRECHANGE
, freqs
);
379 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
382 case CPUFREQ_POSTCHANGE
:
383 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
384 pr_debug("FREQ: %lu - CPU: %lu\n",
385 (unsigned long)freqs
->new, (unsigned long)freqs
->cpu
);
386 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
387 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
388 CPUFREQ_POSTCHANGE
, freqs
);
389 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
390 policy
->cur
= freqs
->new;
396 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
397 * on frequency transition.
399 * This function calls the transition notifiers and the "adjust_jiffies"
400 * function. It is called twice on all CPU frequency changes that have
403 static void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
404 struct cpufreq_freqs
*freqs
, unsigned int state
)
406 for_each_cpu(freqs
->cpu
, policy
->cpus
)
407 __cpufreq_notify_transition(policy
, freqs
, state
);
410 /* Do post notifications when there are chances that transition has failed */
411 static void cpufreq_notify_post_transition(struct cpufreq_policy
*policy
,
412 struct cpufreq_freqs
*freqs
, int transition_failed
)
414 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
415 if (!transition_failed
)
418 swap(freqs
->old
, freqs
->new);
419 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
420 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
423 void cpufreq_freq_transition_begin(struct cpufreq_policy
*policy
,
424 struct cpufreq_freqs
*freqs
)
428 * Catch double invocations of _begin() which lead to self-deadlock.
429 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
430 * doesn't invoke _begin() on their behalf, and hence the chances of
431 * double invocations are very low. Moreover, there are scenarios
432 * where these checks can emit false-positive warnings in these
433 * drivers; so we avoid that by skipping them altogether.
435 WARN_ON(!(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
)
436 && current
== policy
->transition_task
);
439 wait_event(policy
->transition_wait
, !policy
->transition_ongoing
);
441 spin_lock(&policy
->transition_lock
);
443 if (unlikely(policy
->transition_ongoing
)) {
444 spin_unlock(&policy
->transition_lock
);
448 policy
->transition_ongoing
= true;
449 policy
->transition_task
= current
;
451 spin_unlock(&policy
->transition_lock
);
453 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
455 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin
);
457 void cpufreq_freq_transition_end(struct cpufreq_policy
*policy
,
458 struct cpufreq_freqs
*freqs
, int transition_failed
)
460 if (unlikely(WARN_ON(!policy
->transition_ongoing
)))
463 cpufreq_notify_post_transition(policy
, freqs
, transition_failed
);
465 policy
->transition_ongoing
= false;
466 policy
->transition_task
= NULL
;
468 wake_up(&policy
->transition_wait
);
470 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end
);
473 /*********************************************************************
475 *********************************************************************/
476 static ssize_t
show_boost(struct kobject
*kobj
,
477 struct attribute
*attr
, char *buf
)
479 return sprintf(buf
, "%d\n", cpufreq_driver
->boost_enabled
);
482 static ssize_t
store_boost(struct kobject
*kobj
, struct attribute
*attr
,
483 const char *buf
, size_t count
)
487 ret
= sscanf(buf
, "%d", &enable
);
488 if (ret
!= 1 || enable
< 0 || enable
> 1)
491 if (cpufreq_boost_trigger_state(enable
)) {
492 pr_err("%s: Cannot %s BOOST!\n",
493 __func__
, enable
? "enable" : "disable");
497 pr_debug("%s: cpufreq BOOST %s\n",
498 __func__
, enable
? "enabled" : "disabled");
502 define_one_global_rw(boost
);
504 static struct cpufreq_governor
*find_governor(const char *str_governor
)
506 struct cpufreq_governor
*t
;
509 if (!strncasecmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
516 * cpufreq_parse_governor - parse a governor string
518 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
519 struct cpufreq_governor
**governor
)
523 if (cpufreq_driver
->setpolicy
) {
524 if (!strncasecmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
525 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
527 } else if (!strncasecmp(str_governor
, "powersave",
529 *policy
= CPUFREQ_POLICY_POWERSAVE
;
533 struct cpufreq_governor
*t
;
535 mutex_lock(&cpufreq_governor_mutex
);
537 t
= find_governor(str_governor
);
542 mutex_unlock(&cpufreq_governor_mutex
);
543 ret
= request_module("cpufreq_%s", str_governor
);
544 mutex_lock(&cpufreq_governor_mutex
);
547 t
= find_governor(str_governor
);
555 mutex_unlock(&cpufreq_governor_mutex
);
561 * cpufreq_per_cpu_attr_read() / show_##file_name() -
562 * print out cpufreq information
564 * Write out information from cpufreq_driver->policy[cpu]; object must be
568 #define show_one(file_name, object) \
569 static ssize_t show_##file_name \
570 (struct cpufreq_policy *policy, char *buf) \
572 return sprintf(buf, "%u\n", policy->object); \
575 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
576 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
577 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
578 show_one(scaling_min_freq
, min
);
579 show_one(scaling_max_freq
, max
);
581 static ssize_t
show_scaling_cur_freq(struct cpufreq_policy
*policy
, char *buf
)
585 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
586 ret
= sprintf(buf
, "%u\n", cpufreq_driver
->get(policy
->cpu
));
588 ret
= sprintf(buf
, "%u\n", policy
->cur
);
592 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
593 struct cpufreq_policy
*new_policy
);
596 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
598 #define store_one(file_name, object) \
599 static ssize_t store_##file_name \
600 (struct cpufreq_policy *policy, const char *buf, size_t count) \
603 struct cpufreq_policy new_policy; \
605 memcpy(&new_policy, policy, sizeof(*policy)); \
607 ret = sscanf(buf, "%u", &new_policy.object); \
611 temp = new_policy.object; \
612 ret = cpufreq_set_policy(policy, &new_policy); \
614 policy->user_policy.object = temp; \
616 return ret ? ret : count; \
619 store_one(scaling_min_freq
, min
);
620 store_one(scaling_max_freq
, max
);
623 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
625 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
628 unsigned int cur_freq
= __cpufreq_get(policy
);
630 return sprintf(buf
, "<unknown>");
631 return sprintf(buf
, "%u\n", cur_freq
);
635 * show_scaling_governor - show the current policy for the specified CPU
637 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
639 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
640 return sprintf(buf
, "powersave\n");
641 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
642 return sprintf(buf
, "performance\n");
643 else if (policy
->governor
)
644 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
645 policy
->governor
->name
);
650 * store_scaling_governor - store policy for the specified CPU
652 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
653 const char *buf
, size_t count
)
656 char str_governor
[16];
657 struct cpufreq_policy new_policy
;
659 memcpy(&new_policy
, policy
, sizeof(*policy
));
661 ret
= sscanf(buf
, "%15s", str_governor
);
665 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
666 &new_policy
.governor
))
669 ret
= cpufreq_set_policy(policy
, &new_policy
);
670 return ret
? ret
: count
;
674 * show_scaling_driver - show the cpufreq driver currently loaded
676 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
678 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
682 * show_scaling_available_governors - show the available CPUfreq governors
684 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
688 struct cpufreq_governor
*t
;
691 i
+= sprintf(buf
, "performance powersave");
695 for_each_governor(t
) {
696 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
697 - (CPUFREQ_NAME_LEN
+ 2)))
699 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
702 i
+= sprintf(&buf
[i
], "\n");
706 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
711 for_each_cpu(cpu
, mask
) {
713 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
714 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
715 if (i
>= (PAGE_SIZE
- 5))
718 i
+= sprintf(&buf
[i
], "\n");
721 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
724 * show_related_cpus - show the CPUs affected by each transition even if
725 * hw coordination is in use
727 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
729 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
733 * show_affected_cpus - show the CPUs affected by each transition
735 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
737 return cpufreq_show_cpus(policy
->cpus
, buf
);
740 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
741 const char *buf
, size_t count
)
743 unsigned int freq
= 0;
746 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
749 ret
= sscanf(buf
, "%u", &freq
);
753 policy
->governor
->store_setspeed(policy
, freq
);
758 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
760 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
761 return sprintf(buf
, "<unsupported>\n");
763 return policy
->governor
->show_setspeed(policy
, buf
);
767 * show_bios_limit - show the current cpufreq HW/BIOS limitation
769 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
773 if (cpufreq_driver
->bios_limit
) {
774 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
776 return sprintf(buf
, "%u\n", limit
);
778 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
781 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
782 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
783 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
784 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
785 cpufreq_freq_attr_ro(scaling_available_governors
);
786 cpufreq_freq_attr_ro(scaling_driver
);
787 cpufreq_freq_attr_ro(scaling_cur_freq
);
788 cpufreq_freq_attr_ro(bios_limit
);
789 cpufreq_freq_attr_ro(related_cpus
);
790 cpufreq_freq_attr_ro(affected_cpus
);
791 cpufreq_freq_attr_rw(scaling_min_freq
);
792 cpufreq_freq_attr_rw(scaling_max_freq
);
793 cpufreq_freq_attr_rw(scaling_governor
);
794 cpufreq_freq_attr_rw(scaling_setspeed
);
796 static struct attribute
*default_attrs
[] = {
797 &cpuinfo_min_freq
.attr
,
798 &cpuinfo_max_freq
.attr
,
799 &cpuinfo_transition_latency
.attr
,
800 &scaling_min_freq
.attr
,
801 &scaling_max_freq
.attr
,
804 &scaling_governor
.attr
,
805 &scaling_driver
.attr
,
806 &scaling_available_governors
.attr
,
807 &scaling_setspeed
.attr
,
811 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
812 #define to_attr(a) container_of(a, struct freq_attr, attr)
814 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
816 struct cpufreq_policy
*policy
= to_policy(kobj
);
817 struct freq_attr
*fattr
= to_attr(attr
);
820 down_read(&policy
->rwsem
);
823 ret
= fattr
->show(policy
, buf
);
827 up_read(&policy
->rwsem
);
832 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
833 const char *buf
, size_t count
)
835 struct cpufreq_policy
*policy
= to_policy(kobj
);
836 struct freq_attr
*fattr
= to_attr(attr
);
837 ssize_t ret
= -EINVAL
;
841 if (!cpu_online(policy
->cpu
))
844 down_write(&policy
->rwsem
);
847 ret
= fattr
->store(policy
, buf
, count
);
851 up_write(&policy
->rwsem
);
858 static void cpufreq_sysfs_release(struct kobject
*kobj
)
860 struct cpufreq_policy
*policy
= to_policy(kobj
);
861 pr_debug("last reference is dropped\n");
862 complete(&policy
->kobj_unregister
);
865 static const struct sysfs_ops sysfs_ops
= {
870 static struct kobj_type ktype_cpufreq
= {
871 .sysfs_ops
= &sysfs_ops
,
872 .default_attrs
= default_attrs
,
873 .release
= cpufreq_sysfs_release
,
876 static int add_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
878 struct device
*cpu_dev
;
880 pr_debug("%s: Adding symlink for CPU: %u\n", __func__
, cpu
);
885 cpu_dev
= get_cpu_device(cpu
);
886 if (WARN_ON(!cpu_dev
))
889 return sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
, "cpufreq");
892 static void remove_cpu_dev_symlink(struct cpufreq_policy
*policy
, int cpu
)
894 struct device
*cpu_dev
;
896 pr_debug("%s: Removing symlink for CPU: %u\n", __func__
, cpu
);
898 cpu_dev
= get_cpu_device(cpu
);
899 if (WARN_ON(!cpu_dev
))
902 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
905 /* Add/remove symlinks for all related CPUs */
906 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
911 /* Some related CPUs might not be present (physically hotplugged) */
912 for_each_cpu(j
, policy
->real_cpus
) {
913 ret
= add_cpu_dev_symlink(policy
, j
);
921 static void cpufreq_remove_dev_symlink(struct cpufreq_policy
*policy
)
925 /* Some related CPUs might not be present (physically hotplugged) */
926 for_each_cpu(j
, policy
->real_cpus
)
927 remove_cpu_dev_symlink(policy
, j
);
930 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
)
932 struct freq_attr
**drv_attr
;
935 /* set up files for this cpu device */
936 drv_attr
= cpufreq_driver
->attr
;
937 while (drv_attr
&& *drv_attr
) {
938 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
943 if (cpufreq_driver
->get
) {
944 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
949 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
953 if (cpufreq_driver
->bios_limit
) {
954 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
959 return cpufreq_add_dev_symlink(policy
);
962 static int cpufreq_init_policy(struct cpufreq_policy
*policy
)
964 struct cpufreq_governor
*gov
= NULL
;
965 struct cpufreq_policy new_policy
;
967 memcpy(&new_policy
, policy
, sizeof(*policy
));
969 /* Update governor of new_policy to the governor used before hotplug */
970 gov
= find_governor(policy
->last_governor
);
972 pr_debug("Restoring governor %s for cpu %d\n",
973 policy
->governor
->name
, policy
->cpu
);
975 gov
= CPUFREQ_DEFAULT_GOVERNOR
;
977 new_policy
.governor
= gov
;
979 /* Use the default policy if its valid. */
980 if (cpufreq_driver
->setpolicy
)
981 cpufreq_parse_governor(gov
->name
, &new_policy
.policy
, NULL
);
983 /* set default policy */
984 return cpufreq_set_policy(policy
, &new_policy
);
987 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
991 /* Has this CPU been taken care of already? */
992 if (cpumask_test_cpu(cpu
, policy
->cpus
))
996 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
998 pr_err("%s: Failed to stop governor\n", __func__
);
1003 down_write(&policy
->rwsem
);
1004 cpumask_set_cpu(cpu
, policy
->cpus
);
1005 up_write(&policy
->rwsem
);
1008 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1010 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1013 pr_err("%s: Failed to start governor\n", __func__
);
1021 static struct cpufreq_policy
*cpufreq_policy_alloc(unsigned int cpu
)
1023 struct device
*dev
= get_cpu_device(cpu
);
1024 struct cpufreq_policy
*policy
;
1029 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
1033 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
1034 goto err_free_policy
;
1036 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
1037 goto err_free_cpumask
;
1039 if (!zalloc_cpumask_var(&policy
->real_cpus
, GFP_KERNEL
))
1040 goto err_free_rcpumask
;
1042 kobject_init(&policy
->kobj
, &ktype_cpufreq
);
1043 INIT_LIST_HEAD(&policy
->policy_list
);
1044 init_rwsem(&policy
->rwsem
);
1045 spin_lock_init(&policy
->transition_lock
);
1046 init_waitqueue_head(&policy
->transition_wait
);
1047 init_completion(&policy
->kobj_unregister
);
1048 INIT_WORK(&policy
->update
, handle_update
);
1054 free_cpumask_var(policy
->related_cpus
);
1056 free_cpumask_var(policy
->cpus
);
1063 static void cpufreq_policy_put_kobj(struct cpufreq_policy
*policy
, bool notify
)
1065 struct kobject
*kobj
;
1066 struct completion
*cmp
;
1069 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1070 CPUFREQ_REMOVE_POLICY
, policy
);
1072 down_write(&policy
->rwsem
);
1073 cpufreq_remove_dev_symlink(policy
);
1074 kobj
= &policy
->kobj
;
1075 cmp
= &policy
->kobj_unregister
;
1076 up_write(&policy
->rwsem
);
1080 * We need to make sure that the underlying kobj is
1081 * actually not referenced anymore by anybody before we
1082 * proceed with unloading.
1084 pr_debug("waiting for dropping of refcount\n");
1085 wait_for_completion(cmp
);
1086 pr_debug("wait complete\n");
1089 static void cpufreq_policy_free(struct cpufreq_policy
*policy
, bool notify
)
1091 unsigned long flags
;
1094 /* Remove policy from list */
1095 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1096 list_del(&policy
->policy_list
);
1098 for_each_cpu(cpu
, policy
->related_cpus
)
1099 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1100 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1102 cpufreq_policy_put_kobj(policy
, notify
);
1103 free_cpumask_var(policy
->real_cpus
);
1104 free_cpumask_var(policy
->related_cpus
);
1105 free_cpumask_var(policy
->cpus
);
1109 static int cpufreq_online(unsigned int cpu
)
1111 struct cpufreq_policy
*policy
;
1113 unsigned long flags
;
1117 pr_debug("%s: bringing CPU%u online\n", __func__
, cpu
);
1119 /* Check if this CPU already has a policy to manage it */
1120 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1122 WARN_ON(!cpumask_test_cpu(cpu
, policy
->related_cpus
));
1123 if (!policy_is_inactive(policy
))
1124 return cpufreq_add_policy_cpu(policy
, cpu
);
1126 /* This is the only online CPU for the policy. Start over. */
1128 down_write(&policy
->rwsem
);
1130 policy
->governor
= NULL
;
1131 up_write(&policy
->rwsem
);
1134 policy
= cpufreq_policy_alloc(cpu
);
1139 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1141 /* call driver. From then on the cpufreq must be able
1142 * to accept all calls to ->verify and ->setpolicy for this CPU
1144 ret
= cpufreq_driver
->init(policy
);
1146 pr_debug("initialization failed\n");
1147 goto out_free_policy
;
1150 down_write(&policy
->rwsem
);
1153 /* related_cpus should at least include policy->cpus. */
1154 cpumask_copy(policy
->related_cpus
, policy
->cpus
);
1155 /* Remember CPUs present at the policy creation time. */
1156 cpumask_and(policy
->real_cpus
, policy
->cpus
, cpu_present_mask
);
1158 /* Name and add the kobject */
1159 ret
= kobject_add(&policy
->kobj
, cpufreq_global_kobject
,
1161 cpumask_first(policy
->related_cpus
));
1163 pr_err("%s: failed to add policy->kobj: %d\n", __func__
,
1165 goto out_exit_policy
;
1170 * affected cpus must always be the one, which are online. We aren't
1171 * managing offline cpus here.
1173 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1176 policy
->user_policy
.min
= policy
->min
;
1177 policy
->user_policy
.max
= policy
->max
;
1179 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1180 for_each_cpu(j
, policy
->related_cpus
)
1181 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1182 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1185 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
1186 policy
->cur
= cpufreq_driver
->get(policy
->cpu
);
1188 pr_err("%s: ->get() failed\n", __func__
);
1189 goto out_exit_policy
;
1194 * Sometimes boot loaders set CPU frequency to a value outside of
1195 * frequency table present with cpufreq core. In such cases CPU might be
1196 * unstable if it has to run on that frequency for long duration of time
1197 * and so its better to set it to a frequency which is specified in
1198 * freq-table. This also makes cpufreq stats inconsistent as
1199 * cpufreq-stats would fail to register because current frequency of CPU
1200 * isn't found in freq-table.
1202 * Because we don't want this change to effect boot process badly, we go
1203 * for the next freq which is >= policy->cur ('cur' must be set by now,
1204 * otherwise we will end up setting freq to lowest of the table as 'cur'
1205 * is initialized to zero).
1207 * We are passing target-freq as "policy->cur - 1" otherwise
1208 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1209 * equal to target-freq.
1211 if ((cpufreq_driver
->flags
& CPUFREQ_NEED_INITIAL_FREQ_CHECK
)
1213 /* Are we running at unknown frequency ? */
1214 ret
= cpufreq_frequency_table_get_index(policy
, policy
->cur
);
1215 if (ret
== -EINVAL
) {
1216 /* Warn user and fix it */
1217 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1218 __func__
, policy
->cpu
, policy
->cur
);
1219 ret
= __cpufreq_driver_target(policy
, policy
->cur
- 1,
1220 CPUFREQ_RELATION_L
);
1223 * Reaching here after boot in a few seconds may not
1224 * mean that system will remain stable at "unknown"
1225 * frequency for longer duration. Hence, a BUG_ON().
1228 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1229 __func__
, policy
->cpu
, policy
->cur
);
1233 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1234 CPUFREQ_START
, policy
);
1237 ret
= cpufreq_add_dev_interface(policy
);
1239 goto out_exit_policy
;
1240 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1241 CPUFREQ_CREATE_POLICY
, policy
);
1243 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1244 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1245 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1248 ret
= cpufreq_init_policy(policy
);
1250 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1251 __func__
, cpu
, ret
);
1252 /* cpufreq_policy_free() will notify based on this */
1254 goto out_exit_policy
;
1257 up_write(&policy
->rwsem
);
1259 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1261 /* Callback for handling stuff after policy is ready */
1262 if (cpufreq_driver
->ready
)
1263 cpufreq_driver
->ready(policy
);
1265 pr_debug("initialization complete\n");
1270 up_write(&policy
->rwsem
);
1272 if (cpufreq_driver
->exit
)
1273 cpufreq_driver
->exit(policy
);
1275 cpufreq_policy_free(policy
, !new_policy
);
1280 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1282 * @sif: Subsystem interface structure pointer (not used)
1284 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1286 unsigned cpu
= dev
->id
;
1289 dev_dbg(dev
, "%s: adding CPU%u\n", __func__
, cpu
);
1291 if (cpu_online(cpu
)) {
1292 ret
= cpufreq_online(cpu
);
1295 * A hotplug notifier will follow and we will handle it as CPU
1296 * online then. For now, just create the sysfs link, unless
1297 * there is no policy or the link is already present.
1299 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1301 ret
= policy
&& !cpumask_test_and_set_cpu(cpu
, policy
->real_cpus
)
1302 ? add_cpu_dev_symlink(policy
, cpu
) : 0;
1308 static void cpufreq_offline_prepare(unsigned int cpu
)
1310 struct cpufreq_policy
*policy
;
1312 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1314 policy
= cpufreq_cpu_get_raw(cpu
);
1316 pr_debug("%s: No cpu_data found\n", __func__
);
1321 int ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1323 pr_err("%s: Failed to stop governor\n", __func__
);
1326 down_write(&policy
->rwsem
);
1327 cpumask_clear_cpu(cpu
, policy
->cpus
);
1329 if (policy_is_inactive(policy
)) {
1331 strncpy(policy
->last_governor
, policy
->governor
->name
,
1333 } else if (cpu
== policy
->cpu
) {
1334 /* Nominate new CPU */
1335 policy
->cpu
= cpumask_any(policy
->cpus
);
1337 up_write(&policy
->rwsem
);
1339 /* Start governor again for active policy */
1340 if (!policy_is_inactive(policy
)) {
1342 int ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1344 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1347 pr_err("%s: Failed to start governor\n", __func__
);
1349 } else if (cpufreq_driver
->stop_cpu
) {
1350 cpufreq_driver
->stop_cpu(policy
);
1354 static void cpufreq_offline_finish(unsigned int cpu
)
1356 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1359 pr_debug("%s: No cpu_data found\n", __func__
);
1363 /* Only proceed for inactive policies */
1364 if (!policy_is_inactive(policy
))
1367 /* If cpu is last user of policy, free policy */
1369 int ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
1371 pr_err("%s: Failed to exit governor\n", __func__
);
1375 * Perform the ->exit() even during light-weight tear-down,
1376 * since this is a core component, and is essential for the
1377 * subsequent light-weight ->init() to succeed.
1379 if (cpufreq_driver
->exit
) {
1380 cpufreq_driver
->exit(policy
);
1381 policy
->freq_table
= NULL
;
1386 * cpufreq_remove_dev - remove a CPU device
1388 * Removes the cpufreq interface for a CPU device.
1390 static void cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1392 unsigned int cpu
= dev
->id
;
1393 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1398 if (cpu_online(cpu
)) {
1399 cpufreq_offline_prepare(cpu
);
1400 cpufreq_offline_finish(cpu
);
1403 cpumask_clear_cpu(cpu
, policy
->real_cpus
);
1405 if (cpumask_empty(policy
->real_cpus
)) {
1406 cpufreq_policy_free(policy
, true);
1410 remove_cpu_dev_symlink(policy
, cpu
);
1413 static void handle_update(struct work_struct
*work
)
1415 struct cpufreq_policy
*policy
=
1416 container_of(work
, struct cpufreq_policy
, update
);
1417 unsigned int cpu
= policy
->cpu
;
1418 pr_debug("handle_update for cpu %u called\n", cpu
);
1419 cpufreq_update_policy(cpu
);
1423 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1425 * @policy: policy managing CPUs
1426 * @new_freq: CPU frequency the CPU actually runs at
1428 * We adjust to current frequency first, and need to clean up later.
1429 * So either call to cpufreq_update_policy() or schedule handle_update()).
1431 static void cpufreq_out_of_sync(struct cpufreq_policy
*policy
,
1432 unsigned int new_freq
)
1434 struct cpufreq_freqs freqs
;
1436 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1437 policy
->cur
, new_freq
);
1439 freqs
.old
= policy
->cur
;
1440 freqs
.new = new_freq
;
1442 cpufreq_freq_transition_begin(policy
, &freqs
);
1443 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1447 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1450 * This is the last known freq, without actually getting it from the driver.
1451 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1453 unsigned int cpufreq_quick_get(unsigned int cpu
)
1455 struct cpufreq_policy
*policy
;
1456 unsigned int ret_freq
= 0;
1458 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
1459 return cpufreq_driver
->get(cpu
);
1461 policy
= cpufreq_cpu_get(cpu
);
1463 ret_freq
= policy
->cur
;
1464 cpufreq_cpu_put(policy
);
1469 EXPORT_SYMBOL(cpufreq_quick_get
);
1472 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1475 * Just return the max possible frequency for a given CPU.
1477 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1479 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1480 unsigned int ret_freq
= 0;
1483 ret_freq
= policy
->max
;
1484 cpufreq_cpu_put(policy
);
1489 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1491 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
)
1493 unsigned int ret_freq
= 0;
1495 if (!cpufreq_driver
->get
)
1498 ret_freq
= cpufreq_driver
->get(policy
->cpu
);
1500 /* Updating inactive policies is invalid, so avoid doing that. */
1501 if (unlikely(policy_is_inactive(policy
)))
1504 if (ret_freq
&& policy
->cur
&&
1505 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1506 /* verify no discrepancy between actual and
1507 saved value exists */
1508 if (unlikely(ret_freq
!= policy
->cur
)) {
1509 cpufreq_out_of_sync(policy
, ret_freq
);
1510 schedule_work(&policy
->update
);
1518 * cpufreq_get - get the current CPU frequency (in kHz)
1521 * Get the CPU current (static) CPU frequency
1523 unsigned int cpufreq_get(unsigned int cpu
)
1525 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1526 unsigned int ret_freq
= 0;
1529 down_read(&policy
->rwsem
);
1530 ret_freq
= __cpufreq_get(policy
);
1531 up_read(&policy
->rwsem
);
1533 cpufreq_cpu_put(policy
);
1538 EXPORT_SYMBOL(cpufreq_get
);
1540 static struct subsys_interface cpufreq_interface
= {
1542 .subsys
= &cpu_subsys
,
1543 .add_dev
= cpufreq_add_dev
,
1544 .remove_dev
= cpufreq_remove_dev
,
1548 * In case platform wants some specific frequency to be configured
1551 int cpufreq_generic_suspend(struct cpufreq_policy
*policy
)
1555 if (!policy
->suspend_freq
) {
1556 pr_debug("%s: suspend_freq not defined\n", __func__
);
1560 pr_debug("%s: Setting suspend-freq: %u\n", __func__
,
1561 policy
->suspend_freq
);
1563 ret
= __cpufreq_driver_target(policy
, policy
->suspend_freq
,
1564 CPUFREQ_RELATION_H
);
1566 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1567 __func__
, policy
->suspend_freq
, ret
);
1571 EXPORT_SYMBOL(cpufreq_generic_suspend
);
1574 * cpufreq_suspend() - Suspend CPUFreq governors
1576 * Called during system wide Suspend/Hibernate cycles for suspending governors
1577 * as some platforms can't change frequency after this point in suspend cycle.
1578 * Because some of the devices (like: i2c, regulators, etc) they use for
1579 * changing frequency are suspended quickly after this point.
1581 void cpufreq_suspend(void)
1583 struct cpufreq_policy
*policy
;
1585 if (!cpufreq_driver
)
1591 pr_debug("%s: Suspending Governors\n", __func__
);
1593 for_each_active_policy(policy
) {
1594 if (__cpufreq_governor(policy
, CPUFREQ_GOV_STOP
))
1595 pr_err("%s: Failed to stop governor for policy: %p\n",
1597 else if (cpufreq_driver
->suspend
1598 && cpufreq_driver
->suspend(policy
))
1599 pr_err("%s: Failed to suspend driver: %p\n", __func__
,
1604 cpufreq_suspended
= true;
1608 * cpufreq_resume() - Resume CPUFreq governors
1610 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1611 * are suspended with cpufreq_suspend().
1613 void cpufreq_resume(void)
1615 struct cpufreq_policy
*policy
;
1617 if (!cpufreq_driver
)
1620 cpufreq_suspended
= false;
1625 pr_debug("%s: Resuming Governors\n", __func__
);
1627 for_each_active_policy(policy
) {
1628 if (cpufreq_driver
->resume
&& cpufreq_driver
->resume(policy
))
1629 pr_err("%s: Failed to resume driver: %p\n", __func__
,
1631 else if (__cpufreq_governor(policy
, CPUFREQ_GOV_START
)
1632 || __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))
1633 pr_err("%s: Failed to start governor for policy: %p\n",
1638 * schedule call cpufreq_update_policy() for first-online CPU, as that
1639 * wouldn't be hotplugged-out on suspend. It will verify that the
1640 * current freq is in sync with what we believe it to be.
1642 policy
= cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask
));
1643 if (WARN_ON(!policy
))
1646 schedule_work(&policy
->update
);
1650 * cpufreq_get_current_driver - return current driver's name
1652 * Return the name string of the currently loaded cpufreq driver
1655 const char *cpufreq_get_current_driver(void)
1658 return cpufreq_driver
->name
;
1662 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1665 * cpufreq_get_driver_data - return current driver data
1667 * Return the private data of the currently loaded cpufreq
1668 * driver, or NULL if no cpufreq driver is loaded.
1670 void *cpufreq_get_driver_data(void)
1673 return cpufreq_driver
->driver_data
;
1677 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data
);
1679 /*********************************************************************
1680 * NOTIFIER LISTS INTERFACE *
1681 *********************************************************************/
1684 * cpufreq_register_notifier - register a driver with cpufreq
1685 * @nb: notifier function to register
1686 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1688 * Add a driver to one of two lists: either a list of drivers that
1689 * are notified about clock rate changes (once before and once after
1690 * the transition), or a list of drivers that are notified about
1691 * changes in cpufreq policy.
1693 * This function may sleep, and has the same return conditions as
1694 * blocking_notifier_chain_register.
1696 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1700 if (cpufreq_disabled())
1703 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1706 case CPUFREQ_TRANSITION_NOTIFIER
:
1707 ret
= srcu_notifier_chain_register(
1708 &cpufreq_transition_notifier_list
, nb
);
1710 case CPUFREQ_POLICY_NOTIFIER
:
1711 ret
= blocking_notifier_chain_register(
1712 &cpufreq_policy_notifier_list
, nb
);
1720 EXPORT_SYMBOL(cpufreq_register_notifier
);
1723 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1724 * @nb: notifier block to be unregistered
1725 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1727 * Remove a driver from the CPU frequency notifier list.
1729 * This function may sleep, and has the same return conditions as
1730 * blocking_notifier_chain_unregister.
1732 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1736 if (cpufreq_disabled())
1740 case CPUFREQ_TRANSITION_NOTIFIER
:
1741 ret
= srcu_notifier_chain_unregister(
1742 &cpufreq_transition_notifier_list
, nb
);
1744 case CPUFREQ_POLICY_NOTIFIER
:
1745 ret
= blocking_notifier_chain_unregister(
1746 &cpufreq_policy_notifier_list
, nb
);
1754 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1757 /*********************************************************************
1759 *********************************************************************/
1761 /* Must set freqs->new to intermediate frequency */
1762 static int __target_intermediate(struct cpufreq_policy
*policy
,
1763 struct cpufreq_freqs
*freqs
, int index
)
1767 freqs
->new = cpufreq_driver
->get_intermediate(policy
, index
);
1769 /* We don't need to switch to intermediate freq */
1773 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1774 __func__
, policy
->cpu
, freqs
->old
, freqs
->new);
1776 cpufreq_freq_transition_begin(policy
, freqs
);
1777 ret
= cpufreq_driver
->target_intermediate(policy
, index
);
1778 cpufreq_freq_transition_end(policy
, freqs
, ret
);
1781 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1787 static int __target_index(struct cpufreq_policy
*policy
,
1788 struct cpufreq_frequency_table
*freq_table
, int index
)
1790 struct cpufreq_freqs freqs
= {.old
= policy
->cur
, .flags
= 0};
1791 unsigned int intermediate_freq
= 0;
1792 int retval
= -EINVAL
;
1795 notify
= !(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
);
1797 /* Handle switching to intermediate frequency */
1798 if (cpufreq_driver
->get_intermediate
) {
1799 retval
= __target_intermediate(policy
, &freqs
, index
);
1803 intermediate_freq
= freqs
.new;
1804 /* Set old freq to intermediate */
1805 if (intermediate_freq
)
1806 freqs
.old
= freqs
.new;
1809 freqs
.new = freq_table
[index
].frequency
;
1810 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1811 __func__
, policy
->cpu
, freqs
.old
, freqs
.new);
1813 cpufreq_freq_transition_begin(policy
, &freqs
);
1816 retval
= cpufreq_driver
->target_index(policy
, index
);
1818 pr_err("%s: Failed to change cpu frequency: %d\n", __func__
,
1822 cpufreq_freq_transition_end(policy
, &freqs
, retval
);
1825 * Failed after setting to intermediate freq? Driver should have
1826 * reverted back to initial frequency and so should we. Check
1827 * here for intermediate_freq instead of get_intermediate, in
1828 * case we haven't switched to intermediate freq at all.
1830 if (unlikely(retval
&& intermediate_freq
)) {
1831 freqs
.old
= intermediate_freq
;
1832 freqs
.new = policy
->restore_freq
;
1833 cpufreq_freq_transition_begin(policy
, &freqs
);
1834 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1841 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1842 unsigned int target_freq
,
1843 unsigned int relation
)
1845 unsigned int old_target_freq
= target_freq
;
1846 int retval
= -EINVAL
;
1848 if (cpufreq_disabled())
1851 /* Make sure that target_freq is within supported range */
1852 if (target_freq
> policy
->max
)
1853 target_freq
= policy
->max
;
1854 if (target_freq
< policy
->min
)
1855 target_freq
= policy
->min
;
1857 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1858 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1861 * This might look like a redundant call as we are checking it again
1862 * after finding index. But it is left intentionally for cases where
1863 * exactly same freq is called again and so we can save on few function
1866 if (target_freq
== policy
->cur
)
1869 /* Save last value to restore later on errors */
1870 policy
->restore_freq
= policy
->cur
;
1872 if (cpufreq_driver
->target
)
1873 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
1874 else if (cpufreq_driver
->target_index
) {
1875 struct cpufreq_frequency_table
*freq_table
;
1878 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
1879 if (unlikely(!freq_table
)) {
1880 pr_err("%s: Unable to find freq_table\n", __func__
);
1884 retval
= cpufreq_frequency_table_target(policy
, freq_table
,
1885 target_freq
, relation
, &index
);
1886 if (unlikely(retval
)) {
1887 pr_err("%s: Unable to find matching freq\n", __func__
);
1891 if (freq_table
[index
].frequency
== policy
->cur
) {
1896 retval
= __target_index(policy
, freq_table
, index
);
1902 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1904 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1905 unsigned int target_freq
,
1906 unsigned int relation
)
1910 down_write(&policy
->rwsem
);
1912 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1914 up_write(&policy
->rwsem
);
1918 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1920 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1925 /* Only must be defined when default governor is known to have latency
1926 restrictions, like e.g. conservative or ondemand.
1927 That this is the case is already ensured in Kconfig
1929 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1930 struct cpufreq_governor
*gov
= &cpufreq_gov_performance
;
1932 struct cpufreq_governor
*gov
= NULL
;
1935 /* Don't start any governor operations if we are entering suspend */
1936 if (cpufreq_suspended
)
1939 * Governor might not be initiated here if ACPI _PPC changed
1940 * notification happened, so check it.
1942 if (!policy
->governor
)
1945 if (policy
->governor
->max_transition_latency
&&
1946 policy
->cpuinfo
.transition_latency
>
1947 policy
->governor
->max_transition_latency
) {
1951 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1952 policy
->governor
->name
, gov
->name
);
1953 policy
->governor
= gov
;
1957 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1958 if (!try_module_get(policy
->governor
->owner
))
1961 pr_debug("%s: for CPU %u, event %u\n", __func__
, policy
->cpu
, event
);
1963 mutex_lock(&cpufreq_governor_lock
);
1964 if ((policy
->governor_enabled
&& event
== CPUFREQ_GOV_START
)
1965 || (!policy
->governor_enabled
1966 && (event
== CPUFREQ_GOV_LIMITS
|| event
== CPUFREQ_GOV_STOP
))) {
1967 mutex_unlock(&cpufreq_governor_lock
);
1971 if (event
== CPUFREQ_GOV_STOP
)
1972 policy
->governor_enabled
= false;
1973 else if (event
== CPUFREQ_GOV_START
)
1974 policy
->governor_enabled
= true;
1976 mutex_unlock(&cpufreq_governor_lock
);
1978 ret
= policy
->governor
->governor(policy
, event
);
1981 if (event
== CPUFREQ_GOV_POLICY_INIT
)
1982 policy
->governor
->initialized
++;
1983 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
1984 policy
->governor
->initialized
--;
1986 /* Restore original values */
1987 mutex_lock(&cpufreq_governor_lock
);
1988 if (event
== CPUFREQ_GOV_STOP
)
1989 policy
->governor_enabled
= true;
1990 else if (event
== CPUFREQ_GOV_START
)
1991 policy
->governor_enabled
= false;
1992 mutex_unlock(&cpufreq_governor_lock
);
1995 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
1996 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
1997 module_put(policy
->governor
->owner
);
2002 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
2009 if (cpufreq_disabled())
2012 mutex_lock(&cpufreq_governor_mutex
);
2014 governor
->initialized
= 0;
2016 if (!find_governor(governor
->name
)) {
2018 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
2021 mutex_unlock(&cpufreq_governor_mutex
);
2024 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
2026 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
2028 struct cpufreq_policy
*policy
;
2029 unsigned long flags
;
2034 if (cpufreq_disabled())
2037 /* clear last_governor for all inactive policies */
2038 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
2039 for_each_inactive_policy(policy
) {
2040 if (!strcmp(policy
->last_governor
, governor
->name
)) {
2041 policy
->governor
= NULL
;
2042 strcpy(policy
->last_governor
, "\0");
2045 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2047 mutex_lock(&cpufreq_governor_mutex
);
2048 list_del(&governor
->governor_list
);
2049 mutex_unlock(&cpufreq_governor_mutex
);
2052 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
2055 /*********************************************************************
2056 * POLICY INTERFACE *
2057 *********************************************************************/
2060 * cpufreq_get_policy - get the current cpufreq_policy
2061 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2064 * Reads the current cpufreq policy.
2066 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
2068 struct cpufreq_policy
*cpu_policy
;
2072 cpu_policy
= cpufreq_cpu_get(cpu
);
2076 memcpy(policy
, cpu_policy
, sizeof(*policy
));
2078 cpufreq_cpu_put(cpu_policy
);
2081 EXPORT_SYMBOL(cpufreq_get_policy
);
2084 * policy : current policy.
2085 * new_policy: policy to be set.
2087 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
2088 struct cpufreq_policy
*new_policy
)
2090 struct cpufreq_governor
*old_gov
;
2093 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2094 new_policy
->cpu
, new_policy
->min
, new_policy
->max
);
2096 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
2099 * This check works well when we store new min/max freq attributes,
2100 * because new_policy is a copy of policy with one field updated.
2102 if (new_policy
->min
> new_policy
->max
)
2105 /* verify the cpu speed can be set within this limit */
2106 ret
= cpufreq_driver
->verify(new_policy
);
2110 /* adjust if necessary - all reasons */
2111 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2112 CPUFREQ_ADJUST
, new_policy
);
2115 * verify the cpu speed can be set within this limit, which might be
2116 * different to the first one
2118 ret
= cpufreq_driver
->verify(new_policy
);
2122 /* notification of the new policy */
2123 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2124 CPUFREQ_NOTIFY
, new_policy
);
2126 policy
->min
= new_policy
->min
;
2127 policy
->max
= new_policy
->max
;
2129 pr_debug("new min and max freqs are %u - %u kHz\n",
2130 policy
->min
, policy
->max
);
2132 if (cpufreq_driver
->setpolicy
) {
2133 policy
->policy
= new_policy
->policy
;
2134 pr_debug("setting range\n");
2135 return cpufreq_driver
->setpolicy(new_policy
);
2138 if (new_policy
->governor
== policy
->governor
)
2141 pr_debug("governor switch\n");
2143 /* save old, working values */
2144 old_gov
= policy
->governor
;
2145 /* end old governor */
2147 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
2149 /* This can happen due to race with other operations */
2150 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2151 __func__
, old_gov
->name
, ret
);
2155 up_write(&policy
->rwsem
);
2156 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2157 down_write(&policy
->rwsem
);
2160 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2161 __func__
, old_gov
->name
, ret
);
2166 /* start new governor */
2167 policy
->governor
= new_policy
->governor
;
2168 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
);
2170 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2174 up_write(&policy
->rwsem
);
2175 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2176 down_write(&policy
->rwsem
);
2179 /* new governor failed, so re-start old one */
2180 pr_debug("starting governor %s failed\n", policy
->governor
->name
);
2182 policy
->governor
= old_gov
;
2183 if (__cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
))
2184 policy
->governor
= NULL
;
2186 __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2192 pr_debug("governor: change or update limits\n");
2193 return __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2197 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2198 * @cpu: CPU which shall be re-evaluated
2200 * Useful for policy notifiers which have different necessities
2201 * at different times.
2203 int cpufreq_update_policy(unsigned int cpu
)
2205 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
2206 struct cpufreq_policy new_policy
;
2212 down_write(&policy
->rwsem
);
2214 pr_debug("updating policy for CPU %u\n", cpu
);
2215 memcpy(&new_policy
, policy
, sizeof(*policy
));
2216 new_policy
.min
= policy
->user_policy
.min
;
2217 new_policy
.max
= policy
->user_policy
.max
;
2220 * BIOS might change freq behind our back
2221 * -> ask driver for current freq and notify governors about a change
2223 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
2224 new_policy
.cur
= cpufreq_driver
->get(cpu
);
2225 if (WARN_ON(!new_policy
.cur
)) {
2231 pr_debug("Driver did not initialize current freq\n");
2232 policy
->cur
= new_policy
.cur
;
2234 if (policy
->cur
!= new_policy
.cur
&& has_target())
2235 cpufreq_out_of_sync(policy
, new_policy
.cur
);
2239 ret
= cpufreq_set_policy(policy
, &new_policy
);
2242 up_write(&policy
->rwsem
);
2244 cpufreq_cpu_put(policy
);
2247 EXPORT_SYMBOL(cpufreq_update_policy
);
2249 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2250 unsigned long action
, void *hcpu
)
2252 unsigned int cpu
= (unsigned long)hcpu
;
2254 switch (action
& ~CPU_TASKS_FROZEN
) {
2256 cpufreq_online(cpu
);
2259 case CPU_DOWN_PREPARE
:
2260 cpufreq_offline_prepare(cpu
);
2264 cpufreq_offline_finish(cpu
);
2267 case CPU_DOWN_FAILED
:
2268 cpufreq_online(cpu
);
2274 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2275 .notifier_call
= cpufreq_cpu_callback
,
2278 /*********************************************************************
2280 *********************************************************************/
2281 static int cpufreq_boost_set_sw(int state
)
2283 struct cpufreq_frequency_table
*freq_table
;
2284 struct cpufreq_policy
*policy
;
2287 for_each_active_policy(policy
) {
2288 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
2290 ret
= cpufreq_frequency_table_cpuinfo(policy
,
2293 pr_err("%s: Policy frequency update failed\n",
2297 policy
->user_policy
.max
= policy
->max
;
2298 __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2305 int cpufreq_boost_trigger_state(int state
)
2307 unsigned long flags
;
2310 if (cpufreq_driver
->boost_enabled
== state
)
2313 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2314 cpufreq_driver
->boost_enabled
= state
;
2315 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2317 ret
= cpufreq_driver
->set_boost(state
);
2319 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2320 cpufreq_driver
->boost_enabled
= !state
;
2321 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2323 pr_err("%s: Cannot %s BOOST\n",
2324 __func__
, state
? "enable" : "disable");
2330 int cpufreq_boost_supported(void)
2332 if (likely(cpufreq_driver
))
2333 return cpufreq_driver
->boost_supported
;
2337 EXPORT_SYMBOL_GPL(cpufreq_boost_supported
);
2339 static int create_boost_sysfs_file(void)
2343 if (!cpufreq_boost_supported())
2347 * Check if driver provides function to enable boost -
2348 * if not, use cpufreq_boost_set_sw as default
2350 if (!cpufreq_driver
->set_boost
)
2351 cpufreq_driver
->set_boost
= cpufreq_boost_set_sw
;
2353 ret
= sysfs_create_file(cpufreq_global_kobject
, &boost
.attr
);
2355 pr_err("%s: cannot register global BOOST sysfs file\n",
2361 static void remove_boost_sysfs_file(void)
2363 if (cpufreq_boost_supported())
2364 sysfs_remove_file(cpufreq_global_kobject
, &boost
.attr
);
2367 int cpufreq_enable_boost_support(void)
2369 if (!cpufreq_driver
)
2372 if (cpufreq_boost_supported())
2375 cpufreq_driver
->boost_supported
= true;
2377 /* This will get removed on driver unregister */
2378 return create_boost_sysfs_file();
2380 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support
);
2382 int cpufreq_boost_enabled(void)
2384 return cpufreq_driver
->boost_enabled
;
2386 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled
);
2388 /*********************************************************************
2389 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2390 *********************************************************************/
2393 * cpufreq_register_driver - register a CPU Frequency driver
2394 * @driver_data: A struct cpufreq_driver containing the values#
2395 * submitted by the CPU Frequency driver.
2397 * Registers a CPU Frequency driver to this core code. This code
2398 * returns zero on success, -EBUSY when another driver got here first
2399 * (and isn't unregistered in the meantime).
2402 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2404 unsigned long flags
;
2407 if (cpufreq_disabled())
2410 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2411 !(driver_data
->setpolicy
|| driver_data
->target_index
||
2412 driver_data
->target
) ||
2413 (driver_data
->setpolicy
&& (driver_data
->target_index
||
2414 driver_data
->target
)) ||
2415 (!!driver_data
->get_intermediate
!= !!driver_data
->target_intermediate
))
2418 pr_debug("trying to register driver %s\n", driver_data
->name
);
2420 /* Protect against concurrent CPU online/offline. */
2423 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2424 if (cpufreq_driver
) {
2425 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2429 cpufreq_driver
= driver_data
;
2430 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2432 if (driver_data
->setpolicy
)
2433 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2435 ret
= create_boost_sysfs_file();
2437 goto err_null_driver
;
2439 ret
= subsys_interface_register(&cpufreq_interface
);
2441 goto err_boost_unreg
;
2443 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
) &&
2444 list_empty(&cpufreq_policy_list
)) {
2445 /* if all ->init() calls failed, unregister */
2446 pr_debug("%s: No CPU initialized for driver %s\n", __func__
,
2451 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2452 pr_debug("driver %s up and running\n", driver_data
->name
);
2459 subsys_interface_unregister(&cpufreq_interface
);
2461 remove_boost_sysfs_file();
2463 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2464 cpufreq_driver
= NULL
;
2465 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2468 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2471 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2473 * Unregister the current CPUFreq driver. Only call this if you have
2474 * the right to do so, i.e. if you have succeeded in initialising before!
2475 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2476 * currently not initialised.
2478 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2480 unsigned long flags
;
2482 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2485 pr_debug("unregistering driver %s\n", driver
->name
);
2487 /* Protect against concurrent cpu hotplug */
2489 subsys_interface_unregister(&cpufreq_interface
);
2490 remove_boost_sysfs_file();
2491 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2493 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2495 cpufreq_driver
= NULL
;
2497 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2502 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2505 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2506 * or mutexes when secondary CPUs are halted.
2508 static struct syscore_ops cpufreq_syscore_ops
= {
2509 .shutdown
= cpufreq_suspend
,
2512 struct kobject
*cpufreq_global_kobject
;
2513 EXPORT_SYMBOL(cpufreq_global_kobject
);
2515 static int __init
cpufreq_core_init(void)
2517 if (cpufreq_disabled())
2520 cpufreq_global_kobject
= kobject_create_and_add("cpufreq", &cpu_subsys
.dev_root
->kobj
);
2521 BUG_ON(!cpufreq_global_kobject
);
2523 register_syscore_ops(&cpufreq_syscore_ops
);
2527 core_initcall(cpufreq_core_init
);