bus: subsys: update return type of ->remove_dev() to void
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
33
34 static LIST_HEAD(cpufreq_policy_list);
35
36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37 {
38 return cpumask_empty(policy->cpus);
39 }
40
41 static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42 {
43 return active == !policy_is_inactive(policy);
44 }
45
46 /* Finds Next Acive/Inactive policy */
47 static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49 {
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59 }
60
61 static struct cpufreq_policy *first_policy(bool active)
62 {
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76 }
77
78 /* Macros to iterate over CPU policies */
79 #define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84 #define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86 #define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89 #define for_each_policy(__policy) \
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
92 /* Iterate over governors */
93 static LIST_HEAD(cpufreq_governor_list);
94 #define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
97 /**
98 * The "cpufreq driver" - the arch- or hardware-dependent low
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
102 static struct cpufreq_driver *cpufreq_driver;
103 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
104 static DEFINE_RWLOCK(cpufreq_driver_lock);
105 DEFINE_MUTEX(cpufreq_governor_lock);
106
107 /* Flag to suspend/resume CPUFreq governors */
108 static bool cpufreq_suspended;
109
110 static inline bool has_target(void)
111 {
112 return cpufreq_driver->target_index || cpufreq_driver->target;
113 }
114
115 /*
116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
117 * sections
118 */
119 static DECLARE_RWSEM(cpufreq_rwsem);
120
121 /* internal prototypes */
122 static int __cpufreq_governor(struct cpufreq_policy *policy,
123 unsigned int event);
124 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
125 static void handle_update(struct work_struct *work);
126
127 /**
128 * Two notifier lists: the "policy" list is involved in the
129 * validation process for a new CPU frequency policy; the
130 * "transition" list for kernel code that needs to handle
131 * changes to devices when the CPU clock speed changes.
132 * The mutex locks both lists.
133 */
134 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
135 static struct srcu_notifier_head cpufreq_transition_notifier_list;
136
137 static bool init_cpufreq_transition_notifier_list_called;
138 static int __init init_cpufreq_transition_notifier_list(void)
139 {
140 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
141 init_cpufreq_transition_notifier_list_called = true;
142 return 0;
143 }
144 pure_initcall(init_cpufreq_transition_notifier_list);
145
146 static int off __read_mostly;
147 static int cpufreq_disabled(void)
148 {
149 return off;
150 }
151 void disable_cpufreq(void)
152 {
153 off = 1;
154 }
155 static DEFINE_MUTEX(cpufreq_governor_mutex);
156
157 bool have_governor_per_policy(void)
158 {
159 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
160 }
161 EXPORT_SYMBOL_GPL(have_governor_per_policy);
162
163 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
164 {
165 if (have_governor_per_policy())
166 return &policy->kobj;
167 else
168 return cpufreq_global_kobject;
169 }
170 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171
172 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
173 {
174 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
175
176 return policy && !policy_is_inactive(policy) ?
177 policy->freq_table : NULL;
178 }
179 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
180
181 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
182 {
183 u64 idle_time;
184 u64 cur_wall_time;
185 u64 busy_time;
186
187 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
188
189 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
190 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
191 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
192 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
193 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
194 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
195
196 idle_time = cur_wall_time - busy_time;
197 if (wall)
198 *wall = cputime_to_usecs(cur_wall_time);
199
200 return cputime_to_usecs(idle_time);
201 }
202
203 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
204 {
205 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
206
207 if (idle_time == -1ULL)
208 return get_cpu_idle_time_jiffy(cpu, wall);
209 else if (!io_busy)
210 idle_time += get_cpu_iowait_time_us(cpu, wall);
211
212 return idle_time;
213 }
214 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
215
216 /*
217 * This is a generic cpufreq init() routine which can be used by cpufreq
218 * drivers of SMP systems. It will do following:
219 * - validate & show freq table passed
220 * - set policies transition latency
221 * - policy->cpus with all possible CPUs
222 */
223 int cpufreq_generic_init(struct cpufreq_policy *policy,
224 struct cpufreq_frequency_table *table,
225 unsigned int transition_latency)
226 {
227 int ret;
228
229 ret = cpufreq_table_validate_and_show(policy, table);
230 if (ret) {
231 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
232 return ret;
233 }
234
235 policy->cpuinfo.transition_latency = transition_latency;
236
237 /*
238 * The driver only supports the SMP configuration where all processors
239 * share the clock and voltage and clock.
240 */
241 cpumask_setall(policy->cpus);
242
243 return 0;
244 }
245 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
246
247 /* Only for cpufreq core internal use */
248 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
249 {
250 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
251
252 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
253 }
254
255 unsigned int cpufreq_generic_get(unsigned int cpu)
256 {
257 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
258
259 if (!policy || IS_ERR(policy->clk)) {
260 pr_err("%s: No %s associated to cpu: %d\n",
261 __func__, policy ? "clk" : "policy", cpu);
262 return 0;
263 }
264
265 return clk_get_rate(policy->clk) / 1000;
266 }
267 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
268
269 /**
270 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
271 *
272 * @cpu: cpu to find policy for.
273 *
274 * This returns policy for 'cpu', returns NULL if it doesn't exist.
275 * It also increments the kobject reference count to mark it busy and so would
276 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
277 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
278 * freed as that depends on the kobj count.
279 *
280 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
281 * valid policy is found. This is done to make sure the driver doesn't get
282 * unregistered while the policy is being used.
283 *
284 * Return: A valid policy on success, otherwise NULL on failure.
285 */
286 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
287 {
288 struct cpufreq_policy *policy = NULL;
289 unsigned long flags;
290
291 if (WARN_ON(cpu >= nr_cpu_ids))
292 return NULL;
293
294 if (!down_read_trylock(&cpufreq_rwsem))
295 return NULL;
296
297 /* get the cpufreq driver */
298 read_lock_irqsave(&cpufreq_driver_lock, flags);
299
300 if (cpufreq_driver) {
301 /* get the CPU */
302 policy = cpufreq_cpu_get_raw(cpu);
303 if (policy)
304 kobject_get(&policy->kobj);
305 }
306
307 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
308
309 if (!policy)
310 up_read(&cpufreq_rwsem);
311
312 return policy;
313 }
314 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
315
316 /**
317 * cpufreq_cpu_put: Decrements the usage count of a policy
318 *
319 * @policy: policy earlier returned by cpufreq_cpu_get().
320 *
321 * This decrements the kobject reference count incremented earlier by calling
322 * cpufreq_cpu_get().
323 *
324 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
325 */
326 void cpufreq_cpu_put(struct cpufreq_policy *policy)
327 {
328 kobject_put(&policy->kobj);
329 up_read(&cpufreq_rwsem);
330 }
331 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
332
333 /*********************************************************************
334 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
335 *********************************************************************/
336
337 /**
338 * adjust_jiffies - adjust the system "loops_per_jiffy"
339 *
340 * This function alters the system "loops_per_jiffy" for the clock
341 * speed change. Note that loops_per_jiffy cannot be updated on SMP
342 * systems as each CPU might be scaled differently. So, use the arch
343 * per-CPU loops_per_jiffy value wherever possible.
344 */
345 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
346 {
347 #ifndef CONFIG_SMP
348 static unsigned long l_p_j_ref;
349 static unsigned int l_p_j_ref_freq;
350
351 if (ci->flags & CPUFREQ_CONST_LOOPS)
352 return;
353
354 if (!l_p_j_ref_freq) {
355 l_p_j_ref = loops_per_jiffy;
356 l_p_j_ref_freq = ci->old;
357 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
358 l_p_j_ref, l_p_j_ref_freq);
359 }
360 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
361 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
362 ci->new);
363 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
364 loops_per_jiffy, ci->new);
365 }
366 #endif
367 }
368
369 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
370 struct cpufreq_freqs *freqs, unsigned int state)
371 {
372 BUG_ON(irqs_disabled());
373
374 if (cpufreq_disabled())
375 return;
376
377 freqs->flags = cpufreq_driver->flags;
378 pr_debug("notification %u of frequency transition to %u kHz\n",
379 state, freqs->new);
380
381 switch (state) {
382
383 case CPUFREQ_PRECHANGE:
384 /* detect if the driver reported a value as "old frequency"
385 * which is not equal to what the cpufreq core thinks is
386 * "old frequency".
387 */
388 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
389 if ((policy) && (policy->cpu == freqs->cpu) &&
390 (policy->cur) && (policy->cur != freqs->old)) {
391 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
392 freqs->old, policy->cur);
393 freqs->old = policy->cur;
394 }
395 }
396 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
397 CPUFREQ_PRECHANGE, freqs);
398 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
399 break;
400
401 case CPUFREQ_POSTCHANGE:
402 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
403 pr_debug("FREQ: %lu - CPU: %lu\n",
404 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
405 trace_cpu_frequency(freqs->new, freqs->cpu);
406 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
407 CPUFREQ_POSTCHANGE, freqs);
408 if (likely(policy) && likely(policy->cpu == freqs->cpu))
409 policy->cur = freqs->new;
410 break;
411 }
412 }
413
414 /**
415 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
416 * on frequency transition.
417 *
418 * This function calls the transition notifiers and the "adjust_jiffies"
419 * function. It is called twice on all CPU frequency changes that have
420 * external effects.
421 */
422 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
423 struct cpufreq_freqs *freqs, unsigned int state)
424 {
425 for_each_cpu(freqs->cpu, policy->cpus)
426 __cpufreq_notify_transition(policy, freqs, state);
427 }
428
429 /* Do post notifications when there are chances that transition has failed */
430 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
431 struct cpufreq_freqs *freqs, int transition_failed)
432 {
433 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
434 if (!transition_failed)
435 return;
436
437 swap(freqs->old, freqs->new);
438 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
439 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
440 }
441
442 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
443 struct cpufreq_freqs *freqs)
444 {
445
446 /*
447 * Catch double invocations of _begin() which lead to self-deadlock.
448 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
449 * doesn't invoke _begin() on their behalf, and hence the chances of
450 * double invocations are very low. Moreover, there are scenarios
451 * where these checks can emit false-positive warnings in these
452 * drivers; so we avoid that by skipping them altogether.
453 */
454 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
455 && current == policy->transition_task);
456
457 wait:
458 wait_event(policy->transition_wait, !policy->transition_ongoing);
459
460 spin_lock(&policy->transition_lock);
461
462 if (unlikely(policy->transition_ongoing)) {
463 spin_unlock(&policy->transition_lock);
464 goto wait;
465 }
466
467 policy->transition_ongoing = true;
468 policy->transition_task = current;
469
470 spin_unlock(&policy->transition_lock);
471
472 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
473 }
474 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
475
476 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
477 struct cpufreq_freqs *freqs, int transition_failed)
478 {
479 if (unlikely(WARN_ON(!policy->transition_ongoing)))
480 return;
481
482 cpufreq_notify_post_transition(policy, freqs, transition_failed);
483
484 policy->transition_ongoing = false;
485 policy->transition_task = NULL;
486
487 wake_up(&policy->transition_wait);
488 }
489 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
490
491
492 /*********************************************************************
493 * SYSFS INTERFACE *
494 *********************************************************************/
495 static ssize_t show_boost(struct kobject *kobj,
496 struct attribute *attr, char *buf)
497 {
498 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
499 }
500
501 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
502 const char *buf, size_t count)
503 {
504 int ret, enable;
505
506 ret = sscanf(buf, "%d", &enable);
507 if (ret != 1 || enable < 0 || enable > 1)
508 return -EINVAL;
509
510 if (cpufreq_boost_trigger_state(enable)) {
511 pr_err("%s: Cannot %s BOOST!\n",
512 __func__, enable ? "enable" : "disable");
513 return -EINVAL;
514 }
515
516 pr_debug("%s: cpufreq BOOST %s\n",
517 __func__, enable ? "enabled" : "disabled");
518
519 return count;
520 }
521 define_one_global_rw(boost);
522
523 static struct cpufreq_governor *find_governor(const char *str_governor)
524 {
525 struct cpufreq_governor *t;
526
527 for_each_governor(t)
528 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
529 return t;
530
531 return NULL;
532 }
533
534 /**
535 * cpufreq_parse_governor - parse a governor string
536 */
537 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
538 struct cpufreq_governor **governor)
539 {
540 int err = -EINVAL;
541
542 if (!cpufreq_driver)
543 goto out;
544
545 if (cpufreq_driver->setpolicy) {
546 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
547 *policy = CPUFREQ_POLICY_PERFORMANCE;
548 err = 0;
549 } else if (!strncasecmp(str_governor, "powersave",
550 CPUFREQ_NAME_LEN)) {
551 *policy = CPUFREQ_POLICY_POWERSAVE;
552 err = 0;
553 }
554 } else {
555 struct cpufreq_governor *t;
556
557 mutex_lock(&cpufreq_governor_mutex);
558
559 t = find_governor(str_governor);
560
561 if (t == NULL) {
562 int ret;
563
564 mutex_unlock(&cpufreq_governor_mutex);
565 ret = request_module("cpufreq_%s", str_governor);
566 mutex_lock(&cpufreq_governor_mutex);
567
568 if (ret == 0)
569 t = find_governor(str_governor);
570 }
571
572 if (t != NULL) {
573 *governor = t;
574 err = 0;
575 }
576
577 mutex_unlock(&cpufreq_governor_mutex);
578 }
579 out:
580 return err;
581 }
582
583 /**
584 * cpufreq_per_cpu_attr_read() / show_##file_name() -
585 * print out cpufreq information
586 *
587 * Write out information from cpufreq_driver->policy[cpu]; object must be
588 * "unsigned int".
589 */
590
591 #define show_one(file_name, object) \
592 static ssize_t show_##file_name \
593 (struct cpufreq_policy *policy, char *buf) \
594 { \
595 return sprintf(buf, "%u\n", policy->object); \
596 }
597
598 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
599 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
600 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
601 show_one(scaling_min_freq, min);
602 show_one(scaling_max_freq, max);
603
604 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
605 {
606 ssize_t ret;
607
608 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
609 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
610 else
611 ret = sprintf(buf, "%u\n", policy->cur);
612 return ret;
613 }
614
615 static int cpufreq_set_policy(struct cpufreq_policy *policy,
616 struct cpufreq_policy *new_policy);
617
618 /**
619 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
620 */
621 #define store_one(file_name, object) \
622 static ssize_t store_##file_name \
623 (struct cpufreq_policy *policy, const char *buf, size_t count) \
624 { \
625 int ret, temp; \
626 struct cpufreq_policy new_policy; \
627 \
628 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
629 if (ret) \
630 return -EINVAL; \
631 \
632 ret = sscanf(buf, "%u", &new_policy.object); \
633 if (ret != 1) \
634 return -EINVAL; \
635 \
636 temp = new_policy.object; \
637 ret = cpufreq_set_policy(policy, &new_policy); \
638 if (!ret) \
639 policy->user_policy.object = temp; \
640 \
641 return ret ? ret : count; \
642 }
643
644 store_one(scaling_min_freq, min);
645 store_one(scaling_max_freq, max);
646
647 /**
648 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
649 */
650 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
651 char *buf)
652 {
653 unsigned int cur_freq = __cpufreq_get(policy);
654 if (!cur_freq)
655 return sprintf(buf, "<unknown>");
656 return sprintf(buf, "%u\n", cur_freq);
657 }
658
659 /**
660 * show_scaling_governor - show the current policy for the specified CPU
661 */
662 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
663 {
664 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
665 return sprintf(buf, "powersave\n");
666 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
667 return sprintf(buf, "performance\n");
668 else if (policy->governor)
669 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
670 policy->governor->name);
671 return -EINVAL;
672 }
673
674 /**
675 * store_scaling_governor - store policy for the specified CPU
676 */
677 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
678 const char *buf, size_t count)
679 {
680 int ret;
681 char str_governor[16];
682 struct cpufreq_policy new_policy;
683
684 ret = cpufreq_get_policy(&new_policy, policy->cpu);
685 if (ret)
686 return ret;
687
688 ret = sscanf(buf, "%15s", str_governor);
689 if (ret != 1)
690 return -EINVAL;
691
692 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
693 &new_policy.governor))
694 return -EINVAL;
695
696 ret = cpufreq_set_policy(policy, &new_policy);
697
698 policy->user_policy.policy = policy->policy;
699 policy->user_policy.governor = policy->governor;
700
701 if (ret)
702 return ret;
703 else
704 return count;
705 }
706
707 /**
708 * show_scaling_driver - show the cpufreq driver currently loaded
709 */
710 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
711 {
712 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
713 }
714
715 /**
716 * show_scaling_available_governors - show the available CPUfreq governors
717 */
718 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
719 char *buf)
720 {
721 ssize_t i = 0;
722 struct cpufreq_governor *t;
723
724 if (!has_target()) {
725 i += sprintf(buf, "performance powersave");
726 goto out;
727 }
728
729 for_each_governor(t) {
730 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
731 - (CPUFREQ_NAME_LEN + 2)))
732 goto out;
733 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
734 }
735 out:
736 i += sprintf(&buf[i], "\n");
737 return i;
738 }
739
740 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
741 {
742 ssize_t i = 0;
743 unsigned int cpu;
744
745 for_each_cpu(cpu, mask) {
746 if (i)
747 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
748 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
749 if (i >= (PAGE_SIZE - 5))
750 break;
751 }
752 i += sprintf(&buf[i], "\n");
753 return i;
754 }
755 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
756
757 /**
758 * show_related_cpus - show the CPUs affected by each transition even if
759 * hw coordination is in use
760 */
761 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
762 {
763 return cpufreq_show_cpus(policy->related_cpus, buf);
764 }
765
766 /**
767 * show_affected_cpus - show the CPUs affected by each transition
768 */
769 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
770 {
771 return cpufreq_show_cpus(policy->cpus, buf);
772 }
773
774 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
775 const char *buf, size_t count)
776 {
777 unsigned int freq = 0;
778 unsigned int ret;
779
780 if (!policy->governor || !policy->governor->store_setspeed)
781 return -EINVAL;
782
783 ret = sscanf(buf, "%u", &freq);
784 if (ret != 1)
785 return -EINVAL;
786
787 policy->governor->store_setspeed(policy, freq);
788
789 return count;
790 }
791
792 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
793 {
794 if (!policy->governor || !policy->governor->show_setspeed)
795 return sprintf(buf, "<unsupported>\n");
796
797 return policy->governor->show_setspeed(policy, buf);
798 }
799
800 /**
801 * show_bios_limit - show the current cpufreq HW/BIOS limitation
802 */
803 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
804 {
805 unsigned int limit;
806 int ret;
807 if (cpufreq_driver->bios_limit) {
808 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
809 if (!ret)
810 return sprintf(buf, "%u\n", limit);
811 }
812 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
813 }
814
815 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
816 cpufreq_freq_attr_ro(cpuinfo_min_freq);
817 cpufreq_freq_attr_ro(cpuinfo_max_freq);
818 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
819 cpufreq_freq_attr_ro(scaling_available_governors);
820 cpufreq_freq_attr_ro(scaling_driver);
821 cpufreq_freq_attr_ro(scaling_cur_freq);
822 cpufreq_freq_attr_ro(bios_limit);
823 cpufreq_freq_attr_ro(related_cpus);
824 cpufreq_freq_attr_ro(affected_cpus);
825 cpufreq_freq_attr_rw(scaling_min_freq);
826 cpufreq_freq_attr_rw(scaling_max_freq);
827 cpufreq_freq_attr_rw(scaling_governor);
828 cpufreq_freq_attr_rw(scaling_setspeed);
829
830 static struct attribute *default_attrs[] = {
831 &cpuinfo_min_freq.attr,
832 &cpuinfo_max_freq.attr,
833 &cpuinfo_transition_latency.attr,
834 &scaling_min_freq.attr,
835 &scaling_max_freq.attr,
836 &affected_cpus.attr,
837 &related_cpus.attr,
838 &scaling_governor.attr,
839 &scaling_driver.attr,
840 &scaling_available_governors.attr,
841 &scaling_setspeed.attr,
842 NULL
843 };
844
845 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
846 #define to_attr(a) container_of(a, struct freq_attr, attr)
847
848 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
849 {
850 struct cpufreq_policy *policy = to_policy(kobj);
851 struct freq_attr *fattr = to_attr(attr);
852 ssize_t ret;
853
854 if (!down_read_trylock(&cpufreq_rwsem))
855 return -EINVAL;
856
857 down_read(&policy->rwsem);
858
859 if (fattr->show)
860 ret = fattr->show(policy, buf);
861 else
862 ret = -EIO;
863
864 up_read(&policy->rwsem);
865 up_read(&cpufreq_rwsem);
866
867 return ret;
868 }
869
870 static ssize_t store(struct kobject *kobj, struct attribute *attr,
871 const char *buf, size_t count)
872 {
873 struct cpufreq_policy *policy = to_policy(kobj);
874 struct freq_attr *fattr = to_attr(attr);
875 ssize_t ret = -EINVAL;
876
877 get_online_cpus();
878
879 if (!cpu_online(policy->cpu))
880 goto unlock;
881
882 if (!down_read_trylock(&cpufreq_rwsem))
883 goto unlock;
884
885 down_write(&policy->rwsem);
886
887 /* Updating inactive policies is invalid, so avoid doing that. */
888 if (unlikely(policy_is_inactive(policy))) {
889 ret = -EBUSY;
890 goto unlock_policy_rwsem;
891 }
892
893 if (fattr->store)
894 ret = fattr->store(policy, buf, count);
895 else
896 ret = -EIO;
897
898 unlock_policy_rwsem:
899 up_write(&policy->rwsem);
900
901 up_read(&cpufreq_rwsem);
902 unlock:
903 put_online_cpus();
904
905 return ret;
906 }
907
908 static void cpufreq_sysfs_release(struct kobject *kobj)
909 {
910 struct cpufreq_policy *policy = to_policy(kobj);
911 pr_debug("last reference is dropped\n");
912 complete(&policy->kobj_unregister);
913 }
914
915 static const struct sysfs_ops sysfs_ops = {
916 .show = show,
917 .store = store,
918 };
919
920 static struct kobj_type ktype_cpufreq = {
921 .sysfs_ops = &sysfs_ops,
922 .default_attrs = default_attrs,
923 .release = cpufreq_sysfs_release,
924 };
925
926 struct kobject *cpufreq_global_kobject;
927 EXPORT_SYMBOL(cpufreq_global_kobject);
928
929 static int cpufreq_global_kobject_usage;
930
931 int cpufreq_get_global_kobject(void)
932 {
933 if (!cpufreq_global_kobject_usage++)
934 return kobject_add(cpufreq_global_kobject,
935 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
936
937 return 0;
938 }
939 EXPORT_SYMBOL(cpufreq_get_global_kobject);
940
941 void cpufreq_put_global_kobject(void)
942 {
943 if (!--cpufreq_global_kobject_usage)
944 kobject_del(cpufreq_global_kobject);
945 }
946 EXPORT_SYMBOL(cpufreq_put_global_kobject);
947
948 int cpufreq_sysfs_create_file(const struct attribute *attr)
949 {
950 int ret = cpufreq_get_global_kobject();
951
952 if (!ret) {
953 ret = sysfs_create_file(cpufreq_global_kobject, attr);
954 if (ret)
955 cpufreq_put_global_kobject();
956 }
957
958 return ret;
959 }
960 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
961
962 void cpufreq_sysfs_remove_file(const struct attribute *attr)
963 {
964 sysfs_remove_file(cpufreq_global_kobject, attr);
965 cpufreq_put_global_kobject();
966 }
967 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
968
969 static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
970 {
971 struct device *cpu_dev;
972
973 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
974
975 if (!policy)
976 return 0;
977
978 cpu_dev = get_cpu_device(cpu);
979 if (WARN_ON(!cpu_dev))
980 return 0;
981
982 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
983 }
984
985 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
986 {
987 struct device *cpu_dev;
988
989 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
990
991 cpu_dev = get_cpu_device(cpu);
992 if (WARN_ON(!cpu_dev))
993 return;
994
995 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
996 }
997
998 /* Add/remove symlinks for all related CPUs */
999 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
1000 {
1001 unsigned int j;
1002 int ret = 0;
1003
1004 /* Some related CPUs might not be present (physically hotplugged) */
1005 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
1006 if (j == policy->kobj_cpu)
1007 continue;
1008
1009 ret = add_cpu_dev_symlink(policy, j);
1010 if (ret)
1011 break;
1012 }
1013
1014 return ret;
1015 }
1016
1017 static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1018 {
1019 unsigned int j;
1020
1021 /* Some related CPUs might not be present (physically hotplugged) */
1022 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
1023 if (j == policy->kobj_cpu)
1024 continue;
1025
1026 remove_cpu_dev_symlink(policy, j);
1027 }
1028 }
1029
1030 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
1031 struct device *dev)
1032 {
1033 struct freq_attr **drv_attr;
1034 int ret = 0;
1035
1036 /* set up files for this cpu device */
1037 drv_attr = cpufreq_driver->attr;
1038 while (drv_attr && *drv_attr) {
1039 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1040 if (ret)
1041 return ret;
1042 drv_attr++;
1043 }
1044 if (cpufreq_driver->get) {
1045 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1046 if (ret)
1047 return ret;
1048 }
1049
1050 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1051 if (ret)
1052 return ret;
1053
1054 if (cpufreq_driver->bios_limit) {
1055 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1056 if (ret)
1057 return ret;
1058 }
1059
1060 return cpufreq_add_dev_symlink(policy);
1061 }
1062
1063 static void cpufreq_init_policy(struct cpufreq_policy *policy)
1064 {
1065 struct cpufreq_governor *gov = NULL;
1066 struct cpufreq_policy new_policy;
1067 int ret = 0;
1068
1069 memcpy(&new_policy, policy, sizeof(*policy));
1070
1071 /* Update governor of new_policy to the governor used before hotplug */
1072 gov = find_governor(policy->last_governor);
1073 if (gov)
1074 pr_debug("Restoring governor %s for cpu %d\n",
1075 policy->governor->name, policy->cpu);
1076 else
1077 gov = CPUFREQ_DEFAULT_GOVERNOR;
1078
1079 new_policy.governor = gov;
1080
1081 /* Use the default policy if its valid. */
1082 if (cpufreq_driver->setpolicy)
1083 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
1084
1085 /* set default policy */
1086 ret = cpufreq_set_policy(policy, &new_policy);
1087 if (ret) {
1088 pr_debug("setting policy failed\n");
1089 if (cpufreq_driver->exit)
1090 cpufreq_driver->exit(policy);
1091 }
1092 }
1093
1094 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
1095 unsigned int cpu, struct device *dev)
1096 {
1097 int ret = 0;
1098
1099 /* Has this CPU been taken care of already? */
1100 if (cpumask_test_cpu(cpu, policy->cpus))
1101 return 0;
1102
1103 if (has_target()) {
1104 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1105 if (ret) {
1106 pr_err("%s: Failed to stop governor\n", __func__);
1107 return ret;
1108 }
1109 }
1110
1111 down_write(&policy->rwsem);
1112 cpumask_set_cpu(cpu, policy->cpus);
1113 up_write(&policy->rwsem);
1114
1115 if (has_target()) {
1116 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1117 if (!ret)
1118 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1119
1120 if (ret) {
1121 pr_err("%s: Failed to start governor\n", __func__);
1122 return ret;
1123 }
1124 }
1125
1126 return 0;
1127 }
1128
1129 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1130 {
1131 struct cpufreq_policy *policy;
1132 unsigned long flags;
1133
1134 read_lock_irqsave(&cpufreq_driver_lock, flags);
1135 policy = per_cpu(cpufreq_cpu_data, cpu);
1136 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1137
1138 if (likely(policy)) {
1139 /* Policy should be inactive here */
1140 WARN_ON(!policy_is_inactive(policy));
1141
1142 down_write(&policy->rwsem);
1143 policy->cpu = cpu;
1144 policy->governor = NULL;
1145 up_write(&policy->rwsem);
1146 }
1147
1148 return policy;
1149 }
1150
1151 static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1152 {
1153 struct cpufreq_policy *policy;
1154 int ret;
1155
1156 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1157 if (!policy)
1158 return NULL;
1159
1160 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1161 goto err_free_policy;
1162
1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1164 goto err_free_cpumask;
1165
1166 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1167 "cpufreq");
1168 if (ret) {
1169 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1170 goto err_free_rcpumask;
1171 }
1172
1173 INIT_LIST_HEAD(&policy->policy_list);
1174 init_rwsem(&policy->rwsem);
1175 spin_lock_init(&policy->transition_lock);
1176 init_waitqueue_head(&policy->transition_wait);
1177 init_completion(&policy->kobj_unregister);
1178 INIT_WORK(&policy->update, handle_update);
1179
1180 policy->cpu = dev->id;
1181
1182 /* Set this once on allocation */
1183 policy->kobj_cpu = dev->id;
1184
1185 return policy;
1186
1187 err_free_rcpumask:
1188 free_cpumask_var(policy->related_cpus);
1189 err_free_cpumask:
1190 free_cpumask_var(policy->cpus);
1191 err_free_policy:
1192 kfree(policy);
1193
1194 return NULL;
1195 }
1196
1197 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1198 {
1199 struct kobject *kobj;
1200 struct completion *cmp;
1201
1202 if (notify)
1203 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1204 CPUFREQ_REMOVE_POLICY, policy);
1205
1206 down_write(&policy->rwsem);
1207 cpufreq_remove_dev_symlink(policy);
1208 kobj = &policy->kobj;
1209 cmp = &policy->kobj_unregister;
1210 up_write(&policy->rwsem);
1211 kobject_put(kobj);
1212
1213 /*
1214 * We need to make sure that the underlying kobj is
1215 * actually not referenced anymore by anybody before we
1216 * proceed with unloading.
1217 */
1218 pr_debug("waiting for dropping of refcount\n");
1219 wait_for_completion(cmp);
1220 pr_debug("wait complete\n");
1221 }
1222
1223 static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1224 {
1225 unsigned long flags;
1226 int cpu;
1227
1228 /* Remove policy from list */
1229 write_lock_irqsave(&cpufreq_driver_lock, flags);
1230 list_del(&policy->policy_list);
1231
1232 for_each_cpu(cpu, policy->related_cpus)
1233 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1234 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1235
1236 cpufreq_policy_put_kobj(policy, notify);
1237 free_cpumask_var(policy->related_cpus);
1238 free_cpumask_var(policy->cpus);
1239 kfree(policy);
1240 }
1241
1242 /**
1243 * cpufreq_add_dev - add a CPU device
1244 *
1245 * Adds the cpufreq interface for a CPU device.
1246 *
1247 * The Oracle says: try running cpufreq registration/unregistration concurrently
1248 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1249 * mess up, but more thorough testing is needed. - Mathieu
1250 */
1251 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1252 {
1253 unsigned int j, cpu = dev->id;
1254 int ret = -ENOMEM;
1255 struct cpufreq_policy *policy;
1256 unsigned long flags;
1257 bool recover_policy = !sif;
1258
1259 pr_debug("adding CPU %u\n", cpu);
1260
1261 /*
1262 * Only possible if 'cpu' wasn't physically present earlier and we are
1263 * here from subsys_interface add callback. A hotplug notifier will
1264 * follow and we will handle it like logical CPU hotplug then. For now,
1265 * just create the sysfs link.
1266 */
1267 if (cpu_is_offline(cpu))
1268 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
1269
1270 if (!down_read_trylock(&cpufreq_rwsem))
1271 return 0;
1272
1273 /* Check if this CPU already has a policy to manage it */
1274 policy = per_cpu(cpufreq_cpu_data, cpu);
1275 if (policy && !policy_is_inactive(policy)) {
1276 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1277 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1278 up_read(&cpufreq_rwsem);
1279 return ret;
1280 }
1281
1282 /*
1283 * Restore the saved policy when doing light-weight init and fall back
1284 * to the full init if that fails.
1285 */
1286 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1287 if (!policy) {
1288 recover_policy = false;
1289 policy = cpufreq_policy_alloc(dev);
1290 if (!policy)
1291 goto nomem_out;
1292 }
1293
1294 cpumask_copy(policy->cpus, cpumask_of(cpu));
1295
1296 /* call driver. From then on the cpufreq must be able
1297 * to accept all calls to ->verify and ->setpolicy for this CPU
1298 */
1299 ret = cpufreq_driver->init(policy);
1300 if (ret) {
1301 pr_debug("initialization failed\n");
1302 goto err_set_policy_cpu;
1303 }
1304
1305 down_write(&policy->rwsem);
1306
1307 /* related cpus should atleast have policy->cpus */
1308 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1309
1310 /*
1311 * affected cpus must always be the one, which are online. We aren't
1312 * managing offline cpus here.
1313 */
1314 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1315
1316 if (!recover_policy) {
1317 policy->user_policy.min = policy->min;
1318 policy->user_policy.max = policy->max;
1319
1320 write_lock_irqsave(&cpufreq_driver_lock, flags);
1321 for_each_cpu(j, policy->related_cpus)
1322 per_cpu(cpufreq_cpu_data, j) = policy;
1323 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1324 }
1325
1326 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1327 policy->cur = cpufreq_driver->get(policy->cpu);
1328 if (!policy->cur) {
1329 pr_err("%s: ->get() failed\n", __func__);
1330 goto err_get_freq;
1331 }
1332 }
1333
1334 /*
1335 * Sometimes boot loaders set CPU frequency to a value outside of
1336 * frequency table present with cpufreq core. In such cases CPU might be
1337 * unstable if it has to run on that frequency for long duration of time
1338 * and so its better to set it to a frequency which is specified in
1339 * freq-table. This also makes cpufreq stats inconsistent as
1340 * cpufreq-stats would fail to register because current frequency of CPU
1341 * isn't found in freq-table.
1342 *
1343 * Because we don't want this change to effect boot process badly, we go
1344 * for the next freq which is >= policy->cur ('cur' must be set by now,
1345 * otherwise we will end up setting freq to lowest of the table as 'cur'
1346 * is initialized to zero).
1347 *
1348 * We are passing target-freq as "policy->cur - 1" otherwise
1349 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1350 * equal to target-freq.
1351 */
1352 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1353 && has_target()) {
1354 /* Are we running at unknown frequency ? */
1355 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1356 if (ret == -EINVAL) {
1357 /* Warn user and fix it */
1358 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1359 __func__, policy->cpu, policy->cur);
1360 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1361 CPUFREQ_RELATION_L);
1362
1363 /*
1364 * Reaching here after boot in a few seconds may not
1365 * mean that system will remain stable at "unknown"
1366 * frequency for longer duration. Hence, a BUG_ON().
1367 */
1368 BUG_ON(ret);
1369 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1370 __func__, policy->cpu, policy->cur);
1371 }
1372 }
1373
1374 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1375 CPUFREQ_START, policy);
1376
1377 if (!recover_policy) {
1378 ret = cpufreq_add_dev_interface(policy, dev);
1379 if (ret)
1380 goto err_out_unregister;
1381 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1382 CPUFREQ_CREATE_POLICY, policy);
1383
1384 write_lock_irqsave(&cpufreq_driver_lock, flags);
1385 list_add(&policy->policy_list, &cpufreq_policy_list);
1386 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1387 }
1388
1389 cpufreq_init_policy(policy);
1390
1391 if (!recover_policy) {
1392 policy->user_policy.policy = policy->policy;
1393 policy->user_policy.governor = policy->governor;
1394 }
1395 up_write(&policy->rwsem);
1396
1397 kobject_uevent(&policy->kobj, KOBJ_ADD);
1398
1399 up_read(&cpufreq_rwsem);
1400
1401 /* Callback for handling stuff after policy is ready */
1402 if (cpufreq_driver->ready)
1403 cpufreq_driver->ready(policy);
1404
1405 pr_debug("initialization complete\n");
1406
1407 return 0;
1408
1409 err_out_unregister:
1410 err_get_freq:
1411 up_write(&policy->rwsem);
1412
1413 if (cpufreq_driver->exit)
1414 cpufreq_driver->exit(policy);
1415 err_set_policy_cpu:
1416 cpufreq_policy_free(policy, recover_policy);
1417 nomem_out:
1418 up_read(&cpufreq_rwsem);
1419
1420 return ret;
1421 }
1422
1423 static int __cpufreq_remove_dev_prepare(struct device *dev,
1424 struct subsys_interface *sif)
1425 {
1426 unsigned int cpu = dev->id;
1427 int ret = 0;
1428 struct cpufreq_policy *policy;
1429
1430 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1431
1432 policy = cpufreq_cpu_get_raw(cpu);
1433 if (!policy) {
1434 pr_debug("%s: No cpu_data found\n", __func__);
1435 return -EINVAL;
1436 }
1437
1438 if (has_target()) {
1439 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1440 if (ret) {
1441 pr_err("%s: Failed to stop governor\n", __func__);
1442 return ret;
1443 }
1444 }
1445
1446 down_write(&policy->rwsem);
1447 cpumask_clear_cpu(cpu, policy->cpus);
1448
1449 if (policy_is_inactive(policy)) {
1450 if (has_target())
1451 strncpy(policy->last_governor, policy->governor->name,
1452 CPUFREQ_NAME_LEN);
1453 } else if (cpu == policy->cpu) {
1454 /* Nominate new CPU */
1455 policy->cpu = cpumask_any(policy->cpus);
1456 }
1457 up_write(&policy->rwsem);
1458
1459 /* Start governor again for active policy */
1460 if (!policy_is_inactive(policy)) {
1461 if (has_target()) {
1462 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1463 if (!ret)
1464 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1465
1466 if (ret)
1467 pr_err("%s: Failed to start governor\n", __func__);
1468 }
1469 } else if (cpufreq_driver->stop_cpu) {
1470 cpufreq_driver->stop_cpu(policy);
1471 }
1472
1473 return ret;
1474 }
1475
1476 static int __cpufreq_remove_dev_finish(struct device *dev,
1477 struct subsys_interface *sif)
1478 {
1479 unsigned int cpu = dev->id;
1480 int ret;
1481 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1482
1483 if (!policy) {
1484 pr_debug("%s: No cpu_data found\n", __func__);
1485 return -EINVAL;
1486 }
1487
1488 /* Only proceed for inactive policies */
1489 if (!policy_is_inactive(policy))
1490 return 0;
1491
1492 /* If cpu is last user of policy, free policy */
1493 if (has_target()) {
1494 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1495 if (ret) {
1496 pr_err("%s: Failed to exit governor\n", __func__);
1497 return ret;
1498 }
1499 }
1500
1501 /*
1502 * Perform the ->exit() even during light-weight tear-down,
1503 * since this is a core component, and is essential for the
1504 * subsequent light-weight ->init() to succeed.
1505 */
1506 if (cpufreq_driver->exit)
1507 cpufreq_driver->exit(policy);
1508
1509 /* Free the policy only if the driver is getting removed. */
1510 if (sif)
1511 cpufreq_policy_free(policy, true);
1512
1513 return 0;
1514 }
1515
1516 /**
1517 * cpufreq_remove_dev - remove a CPU device
1518 *
1519 * Removes the cpufreq interface for a CPU device.
1520 */
1521 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1522 {
1523 unsigned int cpu = dev->id;
1524 int ret;
1525
1526 /*
1527 * Only possible if 'cpu' is getting physically removed now. A hotplug
1528 * notifier should have already been called and we just need to remove
1529 * link or free policy here.
1530 */
1531 if (cpu_is_offline(cpu)) {
1532 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1533 struct cpumask mask;
1534
1535 if (!policy)
1536 return;
1537
1538 cpumask_copy(&mask, policy->related_cpus);
1539 cpumask_clear_cpu(cpu, &mask);
1540
1541 /*
1542 * Free policy only if all policy->related_cpus are removed
1543 * physically.
1544 */
1545 if (cpumask_intersects(&mask, cpu_present_mask)) {
1546 remove_cpu_dev_symlink(policy, cpu);
1547 return;
1548 }
1549
1550 cpufreq_policy_free(policy, true);
1551 return;
1552 }
1553
1554 ret = __cpufreq_remove_dev_prepare(dev, sif);
1555
1556 if (!ret)
1557 __cpufreq_remove_dev_finish(dev, sif);
1558 }
1559
1560 static void handle_update(struct work_struct *work)
1561 {
1562 struct cpufreq_policy *policy =
1563 container_of(work, struct cpufreq_policy, update);
1564 unsigned int cpu = policy->cpu;
1565 pr_debug("handle_update for cpu %u called\n", cpu);
1566 cpufreq_update_policy(cpu);
1567 }
1568
1569 /**
1570 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1571 * in deep trouble.
1572 * @policy: policy managing CPUs
1573 * @new_freq: CPU frequency the CPU actually runs at
1574 *
1575 * We adjust to current frequency first, and need to clean up later.
1576 * So either call to cpufreq_update_policy() or schedule handle_update()).
1577 */
1578 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1579 unsigned int new_freq)
1580 {
1581 struct cpufreq_freqs freqs;
1582
1583 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1584 policy->cur, new_freq);
1585
1586 freqs.old = policy->cur;
1587 freqs.new = new_freq;
1588
1589 cpufreq_freq_transition_begin(policy, &freqs);
1590 cpufreq_freq_transition_end(policy, &freqs, 0);
1591 }
1592
1593 /**
1594 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1595 * @cpu: CPU number
1596 *
1597 * This is the last known freq, without actually getting it from the driver.
1598 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1599 */
1600 unsigned int cpufreq_quick_get(unsigned int cpu)
1601 {
1602 struct cpufreq_policy *policy;
1603 unsigned int ret_freq = 0;
1604
1605 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1606 return cpufreq_driver->get(cpu);
1607
1608 policy = cpufreq_cpu_get(cpu);
1609 if (policy) {
1610 ret_freq = policy->cur;
1611 cpufreq_cpu_put(policy);
1612 }
1613
1614 return ret_freq;
1615 }
1616 EXPORT_SYMBOL(cpufreq_quick_get);
1617
1618 /**
1619 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1620 * @cpu: CPU number
1621 *
1622 * Just return the max possible frequency for a given CPU.
1623 */
1624 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1625 {
1626 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1627 unsigned int ret_freq = 0;
1628
1629 if (policy) {
1630 ret_freq = policy->max;
1631 cpufreq_cpu_put(policy);
1632 }
1633
1634 return ret_freq;
1635 }
1636 EXPORT_SYMBOL(cpufreq_quick_get_max);
1637
1638 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1639 {
1640 unsigned int ret_freq = 0;
1641
1642 if (!cpufreq_driver->get)
1643 return ret_freq;
1644
1645 ret_freq = cpufreq_driver->get(policy->cpu);
1646
1647 /* Updating inactive policies is invalid, so avoid doing that. */
1648 if (unlikely(policy_is_inactive(policy)))
1649 return ret_freq;
1650
1651 if (ret_freq && policy->cur &&
1652 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1653 /* verify no discrepancy between actual and
1654 saved value exists */
1655 if (unlikely(ret_freq != policy->cur)) {
1656 cpufreq_out_of_sync(policy, ret_freq);
1657 schedule_work(&policy->update);
1658 }
1659 }
1660
1661 return ret_freq;
1662 }
1663
1664 /**
1665 * cpufreq_get - get the current CPU frequency (in kHz)
1666 * @cpu: CPU number
1667 *
1668 * Get the CPU current (static) CPU frequency
1669 */
1670 unsigned int cpufreq_get(unsigned int cpu)
1671 {
1672 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1673 unsigned int ret_freq = 0;
1674
1675 if (policy) {
1676 down_read(&policy->rwsem);
1677 ret_freq = __cpufreq_get(policy);
1678 up_read(&policy->rwsem);
1679
1680 cpufreq_cpu_put(policy);
1681 }
1682
1683 return ret_freq;
1684 }
1685 EXPORT_SYMBOL(cpufreq_get);
1686
1687 static struct subsys_interface cpufreq_interface = {
1688 .name = "cpufreq",
1689 .subsys = &cpu_subsys,
1690 .add_dev = cpufreq_add_dev,
1691 .remove_dev = cpufreq_remove_dev,
1692 };
1693
1694 /*
1695 * In case platform wants some specific frequency to be configured
1696 * during suspend..
1697 */
1698 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1699 {
1700 int ret;
1701
1702 if (!policy->suspend_freq) {
1703 pr_err("%s: suspend_freq can't be zero\n", __func__);
1704 return -EINVAL;
1705 }
1706
1707 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1708 policy->suspend_freq);
1709
1710 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1711 CPUFREQ_RELATION_H);
1712 if (ret)
1713 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1714 __func__, policy->suspend_freq, ret);
1715
1716 return ret;
1717 }
1718 EXPORT_SYMBOL(cpufreq_generic_suspend);
1719
1720 /**
1721 * cpufreq_suspend() - Suspend CPUFreq governors
1722 *
1723 * Called during system wide Suspend/Hibernate cycles for suspending governors
1724 * as some platforms can't change frequency after this point in suspend cycle.
1725 * Because some of the devices (like: i2c, regulators, etc) they use for
1726 * changing frequency are suspended quickly after this point.
1727 */
1728 void cpufreq_suspend(void)
1729 {
1730 struct cpufreq_policy *policy;
1731
1732 if (!cpufreq_driver)
1733 return;
1734
1735 if (!has_target())
1736 goto suspend;
1737
1738 pr_debug("%s: Suspending Governors\n", __func__);
1739
1740 for_each_active_policy(policy) {
1741 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1742 pr_err("%s: Failed to stop governor for policy: %p\n",
1743 __func__, policy);
1744 else if (cpufreq_driver->suspend
1745 && cpufreq_driver->suspend(policy))
1746 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1747 policy);
1748 }
1749
1750 suspend:
1751 cpufreq_suspended = true;
1752 }
1753
1754 /**
1755 * cpufreq_resume() - Resume CPUFreq governors
1756 *
1757 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1758 * are suspended with cpufreq_suspend().
1759 */
1760 void cpufreq_resume(void)
1761 {
1762 struct cpufreq_policy *policy;
1763
1764 if (!cpufreq_driver)
1765 return;
1766
1767 cpufreq_suspended = false;
1768
1769 if (!has_target())
1770 return;
1771
1772 pr_debug("%s: Resuming Governors\n", __func__);
1773
1774 for_each_active_policy(policy) {
1775 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1776 pr_err("%s: Failed to resume driver: %p\n", __func__,
1777 policy);
1778 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1779 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1780 pr_err("%s: Failed to start governor for policy: %p\n",
1781 __func__, policy);
1782 }
1783
1784 /*
1785 * schedule call cpufreq_update_policy() for first-online CPU, as that
1786 * wouldn't be hotplugged-out on suspend. It will verify that the
1787 * current freq is in sync with what we believe it to be.
1788 */
1789 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1790 if (WARN_ON(!policy))
1791 return;
1792
1793 schedule_work(&policy->update);
1794 }
1795
1796 /**
1797 * cpufreq_get_current_driver - return current driver's name
1798 *
1799 * Return the name string of the currently loaded cpufreq driver
1800 * or NULL, if none.
1801 */
1802 const char *cpufreq_get_current_driver(void)
1803 {
1804 if (cpufreq_driver)
1805 return cpufreq_driver->name;
1806
1807 return NULL;
1808 }
1809 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1810
1811 /**
1812 * cpufreq_get_driver_data - return current driver data
1813 *
1814 * Return the private data of the currently loaded cpufreq
1815 * driver, or NULL if no cpufreq driver is loaded.
1816 */
1817 void *cpufreq_get_driver_data(void)
1818 {
1819 if (cpufreq_driver)
1820 return cpufreq_driver->driver_data;
1821
1822 return NULL;
1823 }
1824 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1825
1826 /*********************************************************************
1827 * NOTIFIER LISTS INTERFACE *
1828 *********************************************************************/
1829
1830 /**
1831 * cpufreq_register_notifier - register a driver with cpufreq
1832 * @nb: notifier function to register
1833 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1834 *
1835 * Add a driver to one of two lists: either a list of drivers that
1836 * are notified about clock rate changes (once before and once after
1837 * the transition), or a list of drivers that are notified about
1838 * changes in cpufreq policy.
1839 *
1840 * This function may sleep, and has the same return conditions as
1841 * blocking_notifier_chain_register.
1842 */
1843 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1844 {
1845 int ret;
1846
1847 if (cpufreq_disabled())
1848 return -EINVAL;
1849
1850 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1851
1852 switch (list) {
1853 case CPUFREQ_TRANSITION_NOTIFIER:
1854 ret = srcu_notifier_chain_register(
1855 &cpufreq_transition_notifier_list, nb);
1856 break;
1857 case CPUFREQ_POLICY_NOTIFIER:
1858 ret = blocking_notifier_chain_register(
1859 &cpufreq_policy_notifier_list, nb);
1860 break;
1861 default:
1862 ret = -EINVAL;
1863 }
1864
1865 return ret;
1866 }
1867 EXPORT_SYMBOL(cpufreq_register_notifier);
1868
1869 /**
1870 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1871 * @nb: notifier block to be unregistered
1872 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1873 *
1874 * Remove a driver from the CPU frequency notifier list.
1875 *
1876 * This function may sleep, and has the same return conditions as
1877 * blocking_notifier_chain_unregister.
1878 */
1879 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1880 {
1881 int ret;
1882
1883 if (cpufreq_disabled())
1884 return -EINVAL;
1885
1886 switch (list) {
1887 case CPUFREQ_TRANSITION_NOTIFIER:
1888 ret = srcu_notifier_chain_unregister(
1889 &cpufreq_transition_notifier_list, nb);
1890 break;
1891 case CPUFREQ_POLICY_NOTIFIER:
1892 ret = blocking_notifier_chain_unregister(
1893 &cpufreq_policy_notifier_list, nb);
1894 break;
1895 default:
1896 ret = -EINVAL;
1897 }
1898
1899 return ret;
1900 }
1901 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1902
1903
1904 /*********************************************************************
1905 * GOVERNORS *
1906 *********************************************************************/
1907
1908 /* Must set freqs->new to intermediate frequency */
1909 static int __target_intermediate(struct cpufreq_policy *policy,
1910 struct cpufreq_freqs *freqs, int index)
1911 {
1912 int ret;
1913
1914 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1915
1916 /* We don't need to switch to intermediate freq */
1917 if (!freqs->new)
1918 return 0;
1919
1920 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1921 __func__, policy->cpu, freqs->old, freqs->new);
1922
1923 cpufreq_freq_transition_begin(policy, freqs);
1924 ret = cpufreq_driver->target_intermediate(policy, index);
1925 cpufreq_freq_transition_end(policy, freqs, ret);
1926
1927 if (ret)
1928 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1929 __func__, ret);
1930
1931 return ret;
1932 }
1933
1934 static int __target_index(struct cpufreq_policy *policy,
1935 struct cpufreq_frequency_table *freq_table, int index)
1936 {
1937 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1938 unsigned int intermediate_freq = 0;
1939 int retval = -EINVAL;
1940 bool notify;
1941
1942 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1943 if (notify) {
1944 /* Handle switching to intermediate frequency */
1945 if (cpufreq_driver->get_intermediate) {
1946 retval = __target_intermediate(policy, &freqs, index);
1947 if (retval)
1948 return retval;
1949
1950 intermediate_freq = freqs.new;
1951 /* Set old freq to intermediate */
1952 if (intermediate_freq)
1953 freqs.old = freqs.new;
1954 }
1955
1956 freqs.new = freq_table[index].frequency;
1957 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1958 __func__, policy->cpu, freqs.old, freqs.new);
1959
1960 cpufreq_freq_transition_begin(policy, &freqs);
1961 }
1962
1963 retval = cpufreq_driver->target_index(policy, index);
1964 if (retval)
1965 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1966 retval);
1967
1968 if (notify) {
1969 cpufreq_freq_transition_end(policy, &freqs, retval);
1970
1971 /*
1972 * Failed after setting to intermediate freq? Driver should have
1973 * reverted back to initial frequency and so should we. Check
1974 * here for intermediate_freq instead of get_intermediate, in
1975 * case we haven't switched to intermediate freq at all.
1976 */
1977 if (unlikely(retval && intermediate_freq)) {
1978 freqs.old = intermediate_freq;
1979 freqs.new = policy->restore_freq;
1980 cpufreq_freq_transition_begin(policy, &freqs);
1981 cpufreq_freq_transition_end(policy, &freqs, 0);
1982 }
1983 }
1984
1985 return retval;
1986 }
1987
1988 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1989 unsigned int target_freq,
1990 unsigned int relation)
1991 {
1992 unsigned int old_target_freq = target_freq;
1993 int retval = -EINVAL;
1994
1995 if (cpufreq_disabled())
1996 return -ENODEV;
1997
1998 /* Make sure that target_freq is within supported range */
1999 if (target_freq > policy->max)
2000 target_freq = policy->max;
2001 if (target_freq < policy->min)
2002 target_freq = policy->min;
2003
2004 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2005 policy->cpu, target_freq, relation, old_target_freq);
2006
2007 /*
2008 * This might look like a redundant call as we are checking it again
2009 * after finding index. But it is left intentionally for cases where
2010 * exactly same freq is called again and so we can save on few function
2011 * calls.
2012 */
2013 if (target_freq == policy->cur)
2014 return 0;
2015
2016 /* Save last value to restore later on errors */
2017 policy->restore_freq = policy->cur;
2018
2019 if (cpufreq_driver->target)
2020 retval = cpufreq_driver->target(policy, target_freq, relation);
2021 else if (cpufreq_driver->target_index) {
2022 struct cpufreq_frequency_table *freq_table;
2023 int index;
2024
2025 freq_table = cpufreq_frequency_get_table(policy->cpu);
2026 if (unlikely(!freq_table)) {
2027 pr_err("%s: Unable to find freq_table\n", __func__);
2028 goto out;
2029 }
2030
2031 retval = cpufreq_frequency_table_target(policy, freq_table,
2032 target_freq, relation, &index);
2033 if (unlikely(retval)) {
2034 pr_err("%s: Unable to find matching freq\n", __func__);
2035 goto out;
2036 }
2037
2038 if (freq_table[index].frequency == policy->cur) {
2039 retval = 0;
2040 goto out;
2041 }
2042
2043 retval = __target_index(policy, freq_table, index);
2044 }
2045
2046 out:
2047 return retval;
2048 }
2049 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2050
2051 int cpufreq_driver_target(struct cpufreq_policy *policy,
2052 unsigned int target_freq,
2053 unsigned int relation)
2054 {
2055 int ret = -EINVAL;
2056
2057 down_write(&policy->rwsem);
2058
2059 ret = __cpufreq_driver_target(policy, target_freq, relation);
2060
2061 up_write(&policy->rwsem);
2062
2063 return ret;
2064 }
2065 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2066
2067 static int __cpufreq_governor(struct cpufreq_policy *policy,
2068 unsigned int event)
2069 {
2070 int ret;
2071
2072 /* Only must be defined when default governor is known to have latency
2073 restrictions, like e.g. conservative or ondemand.
2074 That this is the case is already ensured in Kconfig
2075 */
2076 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2077 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2078 #else
2079 struct cpufreq_governor *gov = NULL;
2080 #endif
2081
2082 /* Don't start any governor operations if we are entering suspend */
2083 if (cpufreq_suspended)
2084 return 0;
2085 /*
2086 * Governor might not be initiated here if ACPI _PPC changed
2087 * notification happened, so check it.
2088 */
2089 if (!policy->governor)
2090 return -EINVAL;
2091
2092 if (policy->governor->max_transition_latency &&
2093 policy->cpuinfo.transition_latency >
2094 policy->governor->max_transition_latency) {
2095 if (!gov)
2096 return -EINVAL;
2097 else {
2098 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2099 policy->governor->name, gov->name);
2100 policy->governor = gov;
2101 }
2102 }
2103
2104 if (event == CPUFREQ_GOV_POLICY_INIT)
2105 if (!try_module_get(policy->governor->owner))
2106 return -EINVAL;
2107
2108 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2109 policy->cpu, event);
2110
2111 mutex_lock(&cpufreq_governor_lock);
2112 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2113 || (!policy->governor_enabled
2114 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2115 mutex_unlock(&cpufreq_governor_lock);
2116 return -EBUSY;
2117 }
2118
2119 if (event == CPUFREQ_GOV_STOP)
2120 policy->governor_enabled = false;
2121 else if (event == CPUFREQ_GOV_START)
2122 policy->governor_enabled = true;
2123
2124 mutex_unlock(&cpufreq_governor_lock);
2125
2126 ret = policy->governor->governor(policy, event);
2127
2128 if (!ret) {
2129 if (event == CPUFREQ_GOV_POLICY_INIT)
2130 policy->governor->initialized++;
2131 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2132 policy->governor->initialized--;
2133 } else {
2134 /* Restore original values */
2135 mutex_lock(&cpufreq_governor_lock);
2136 if (event == CPUFREQ_GOV_STOP)
2137 policy->governor_enabled = true;
2138 else if (event == CPUFREQ_GOV_START)
2139 policy->governor_enabled = false;
2140 mutex_unlock(&cpufreq_governor_lock);
2141 }
2142
2143 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2144 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2145 module_put(policy->governor->owner);
2146
2147 return ret;
2148 }
2149
2150 int cpufreq_register_governor(struct cpufreq_governor *governor)
2151 {
2152 int err;
2153
2154 if (!governor)
2155 return -EINVAL;
2156
2157 if (cpufreq_disabled())
2158 return -ENODEV;
2159
2160 mutex_lock(&cpufreq_governor_mutex);
2161
2162 governor->initialized = 0;
2163 err = -EBUSY;
2164 if (!find_governor(governor->name)) {
2165 err = 0;
2166 list_add(&governor->governor_list, &cpufreq_governor_list);
2167 }
2168
2169 mutex_unlock(&cpufreq_governor_mutex);
2170 return err;
2171 }
2172 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2173
2174 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2175 {
2176 struct cpufreq_policy *policy;
2177 unsigned long flags;
2178
2179 if (!governor)
2180 return;
2181
2182 if (cpufreq_disabled())
2183 return;
2184
2185 /* clear last_governor for all inactive policies */
2186 read_lock_irqsave(&cpufreq_driver_lock, flags);
2187 for_each_inactive_policy(policy) {
2188 if (!strcmp(policy->last_governor, governor->name)) {
2189 policy->governor = NULL;
2190 strcpy(policy->last_governor, "\0");
2191 }
2192 }
2193 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2194
2195 mutex_lock(&cpufreq_governor_mutex);
2196 list_del(&governor->governor_list);
2197 mutex_unlock(&cpufreq_governor_mutex);
2198 return;
2199 }
2200 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2201
2202
2203 /*********************************************************************
2204 * POLICY INTERFACE *
2205 *********************************************************************/
2206
2207 /**
2208 * cpufreq_get_policy - get the current cpufreq_policy
2209 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2210 * is written
2211 *
2212 * Reads the current cpufreq policy.
2213 */
2214 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2215 {
2216 struct cpufreq_policy *cpu_policy;
2217 if (!policy)
2218 return -EINVAL;
2219
2220 cpu_policy = cpufreq_cpu_get(cpu);
2221 if (!cpu_policy)
2222 return -EINVAL;
2223
2224 memcpy(policy, cpu_policy, sizeof(*policy));
2225
2226 cpufreq_cpu_put(cpu_policy);
2227 return 0;
2228 }
2229 EXPORT_SYMBOL(cpufreq_get_policy);
2230
2231 /*
2232 * policy : current policy.
2233 * new_policy: policy to be set.
2234 */
2235 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2236 struct cpufreq_policy *new_policy)
2237 {
2238 struct cpufreq_governor *old_gov;
2239 int ret;
2240
2241 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2242 new_policy->cpu, new_policy->min, new_policy->max);
2243
2244 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2245
2246 if (new_policy->min > policy->max || new_policy->max < policy->min)
2247 return -EINVAL;
2248
2249 /* verify the cpu speed can be set within this limit */
2250 ret = cpufreq_driver->verify(new_policy);
2251 if (ret)
2252 return ret;
2253
2254 /* adjust if necessary - all reasons */
2255 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2256 CPUFREQ_ADJUST, new_policy);
2257
2258 /* adjust if necessary - hardware incompatibility*/
2259 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2260 CPUFREQ_INCOMPATIBLE, new_policy);
2261
2262 /*
2263 * verify the cpu speed can be set within this limit, which might be
2264 * different to the first one
2265 */
2266 ret = cpufreq_driver->verify(new_policy);
2267 if (ret)
2268 return ret;
2269
2270 /* notification of the new policy */
2271 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2272 CPUFREQ_NOTIFY, new_policy);
2273
2274 policy->min = new_policy->min;
2275 policy->max = new_policy->max;
2276
2277 pr_debug("new min and max freqs are %u - %u kHz\n",
2278 policy->min, policy->max);
2279
2280 if (cpufreq_driver->setpolicy) {
2281 policy->policy = new_policy->policy;
2282 pr_debug("setting range\n");
2283 return cpufreq_driver->setpolicy(new_policy);
2284 }
2285
2286 if (new_policy->governor == policy->governor)
2287 goto out;
2288
2289 pr_debug("governor switch\n");
2290
2291 /* save old, working values */
2292 old_gov = policy->governor;
2293 /* end old governor */
2294 if (old_gov) {
2295 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2296 up_write(&policy->rwsem);
2297 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2298 down_write(&policy->rwsem);
2299 }
2300
2301 /* start new governor */
2302 policy->governor = new_policy->governor;
2303 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2304 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2305 goto out;
2306
2307 up_write(&policy->rwsem);
2308 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2309 down_write(&policy->rwsem);
2310 }
2311
2312 /* new governor failed, so re-start old one */
2313 pr_debug("starting governor %s failed\n", policy->governor->name);
2314 if (old_gov) {
2315 policy->governor = old_gov;
2316 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2317 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2318 }
2319
2320 return -EINVAL;
2321
2322 out:
2323 pr_debug("governor: change or update limits\n");
2324 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2325 }
2326
2327 /**
2328 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2329 * @cpu: CPU which shall be re-evaluated
2330 *
2331 * Useful for policy notifiers which have different necessities
2332 * at different times.
2333 */
2334 int cpufreq_update_policy(unsigned int cpu)
2335 {
2336 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2337 struct cpufreq_policy new_policy;
2338 int ret;
2339
2340 if (!policy)
2341 return -ENODEV;
2342
2343 down_write(&policy->rwsem);
2344
2345 pr_debug("updating policy for CPU %u\n", cpu);
2346 memcpy(&new_policy, policy, sizeof(*policy));
2347 new_policy.min = policy->user_policy.min;
2348 new_policy.max = policy->user_policy.max;
2349 new_policy.policy = policy->user_policy.policy;
2350 new_policy.governor = policy->user_policy.governor;
2351
2352 /*
2353 * BIOS might change freq behind our back
2354 * -> ask driver for current freq and notify governors about a change
2355 */
2356 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2357 new_policy.cur = cpufreq_driver->get(cpu);
2358 if (WARN_ON(!new_policy.cur)) {
2359 ret = -EIO;
2360 goto unlock;
2361 }
2362
2363 if (!policy->cur) {
2364 pr_debug("Driver did not initialize current freq\n");
2365 policy->cur = new_policy.cur;
2366 } else {
2367 if (policy->cur != new_policy.cur && has_target())
2368 cpufreq_out_of_sync(policy, new_policy.cur);
2369 }
2370 }
2371
2372 ret = cpufreq_set_policy(policy, &new_policy);
2373
2374 unlock:
2375 up_write(&policy->rwsem);
2376
2377 cpufreq_cpu_put(policy);
2378 return ret;
2379 }
2380 EXPORT_SYMBOL(cpufreq_update_policy);
2381
2382 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2383 unsigned long action, void *hcpu)
2384 {
2385 unsigned int cpu = (unsigned long)hcpu;
2386 struct device *dev;
2387
2388 dev = get_cpu_device(cpu);
2389 if (dev) {
2390 switch (action & ~CPU_TASKS_FROZEN) {
2391 case CPU_ONLINE:
2392 cpufreq_add_dev(dev, NULL);
2393 break;
2394
2395 case CPU_DOWN_PREPARE:
2396 __cpufreq_remove_dev_prepare(dev, NULL);
2397 break;
2398
2399 case CPU_POST_DEAD:
2400 __cpufreq_remove_dev_finish(dev, NULL);
2401 break;
2402
2403 case CPU_DOWN_FAILED:
2404 cpufreq_add_dev(dev, NULL);
2405 break;
2406 }
2407 }
2408 return NOTIFY_OK;
2409 }
2410
2411 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2412 .notifier_call = cpufreq_cpu_callback,
2413 };
2414
2415 /*********************************************************************
2416 * BOOST *
2417 *********************************************************************/
2418 static int cpufreq_boost_set_sw(int state)
2419 {
2420 struct cpufreq_frequency_table *freq_table;
2421 struct cpufreq_policy *policy;
2422 int ret = -EINVAL;
2423
2424 for_each_active_policy(policy) {
2425 freq_table = cpufreq_frequency_get_table(policy->cpu);
2426 if (freq_table) {
2427 ret = cpufreq_frequency_table_cpuinfo(policy,
2428 freq_table);
2429 if (ret) {
2430 pr_err("%s: Policy frequency update failed\n",
2431 __func__);
2432 break;
2433 }
2434 policy->user_policy.max = policy->max;
2435 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2436 }
2437 }
2438
2439 return ret;
2440 }
2441
2442 int cpufreq_boost_trigger_state(int state)
2443 {
2444 unsigned long flags;
2445 int ret = 0;
2446
2447 if (cpufreq_driver->boost_enabled == state)
2448 return 0;
2449
2450 write_lock_irqsave(&cpufreq_driver_lock, flags);
2451 cpufreq_driver->boost_enabled = state;
2452 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2453
2454 ret = cpufreq_driver->set_boost(state);
2455 if (ret) {
2456 write_lock_irqsave(&cpufreq_driver_lock, flags);
2457 cpufreq_driver->boost_enabled = !state;
2458 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2459
2460 pr_err("%s: Cannot %s BOOST\n",
2461 __func__, state ? "enable" : "disable");
2462 }
2463
2464 return ret;
2465 }
2466
2467 int cpufreq_boost_supported(void)
2468 {
2469 if (likely(cpufreq_driver))
2470 return cpufreq_driver->boost_supported;
2471
2472 return 0;
2473 }
2474 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2475
2476 int cpufreq_boost_enabled(void)
2477 {
2478 return cpufreq_driver->boost_enabled;
2479 }
2480 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2481
2482 /*********************************************************************
2483 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2484 *********************************************************************/
2485
2486 /**
2487 * cpufreq_register_driver - register a CPU Frequency driver
2488 * @driver_data: A struct cpufreq_driver containing the values#
2489 * submitted by the CPU Frequency driver.
2490 *
2491 * Registers a CPU Frequency driver to this core code. This code
2492 * returns zero on success, -EBUSY when another driver got here first
2493 * (and isn't unregistered in the meantime).
2494 *
2495 */
2496 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2497 {
2498 unsigned long flags;
2499 int ret;
2500
2501 if (cpufreq_disabled())
2502 return -ENODEV;
2503
2504 if (!driver_data || !driver_data->verify || !driver_data->init ||
2505 !(driver_data->setpolicy || driver_data->target_index ||
2506 driver_data->target) ||
2507 (driver_data->setpolicy && (driver_data->target_index ||
2508 driver_data->target)) ||
2509 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2510 return -EINVAL;
2511
2512 pr_debug("trying to register driver %s\n", driver_data->name);
2513
2514 write_lock_irqsave(&cpufreq_driver_lock, flags);
2515 if (cpufreq_driver) {
2516 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2517 return -EEXIST;
2518 }
2519 cpufreq_driver = driver_data;
2520 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2521
2522 if (driver_data->setpolicy)
2523 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2524
2525 if (cpufreq_boost_supported()) {
2526 /*
2527 * Check if driver provides function to enable boost -
2528 * if not, use cpufreq_boost_set_sw as default
2529 */
2530 if (!cpufreq_driver->set_boost)
2531 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2532
2533 ret = cpufreq_sysfs_create_file(&boost.attr);
2534 if (ret) {
2535 pr_err("%s: cannot register global BOOST sysfs file\n",
2536 __func__);
2537 goto err_null_driver;
2538 }
2539 }
2540
2541 ret = subsys_interface_register(&cpufreq_interface);
2542 if (ret)
2543 goto err_boost_unreg;
2544
2545 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2546 list_empty(&cpufreq_policy_list)) {
2547 /* if all ->init() calls failed, unregister */
2548 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2549 driver_data->name);
2550 goto err_if_unreg;
2551 }
2552
2553 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2554 pr_debug("driver %s up and running\n", driver_data->name);
2555
2556 return 0;
2557 err_if_unreg:
2558 subsys_interface_unregister(&cpufreq_interface);
2559 err_boost_unreg:
2560 if (cpufreq_boost_supported())
2561 cpufreq_sysfs_remove_file(&boost.attr);
2562 err_null_driver:
2563 write_lock_irqsave(&cpufreq_driver_lock, flags);
2564 cpufreq_driver = NULL;
2565 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2566 return ret;
2567 }
2568 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2569
2570 /**
2571 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2572 *
2573 * Unregister the current CPUFreq driver. Only call this if you have
2574 * the right to do so, i.e. if you have succeeded in initialising before!
2575 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2576 * currently not initialised.
2577 */
2578 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2579 {
2580 unsigned long flags;
2581
2582 if (!cpufreq_driver || (driver != cpufreq_driver))
2583 return -EINVAL;
2584
2585 pr_debug("unregistering driver %s\n", driver->name);
2586
2587 subsys_interface_unregister(&cpufreq_interface);
2588 if (cpufreq_boost_supported())
2589 cpufreq_sysfs_remove_file(&boost.attr);
2590
2591 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2592
2593 down_write(&cpufreq_rwsem);
2594 write_lock_irqsave(&cpufreq_driver_lock, flags);
2595
2596 cpufreq_driver = NULL;
2597
2598 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2599 up_write(&cpufreq_rwsem);
2600
2601 return 0;
2602 }
2603 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2604
2605 /*
2606 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2607 * or mutexes when secondary CPUs are halted.
2608 */
2609 static struct syscore_ops cpufreq_syscore_ops = {
2610 .shutdown = cpufreq_suspend,
2611 };
2612
2613 static int __init cpufreq_core_init(void)
2614 {
2615 if (cpufreq_disabled())
2616 return -ENODEV;
2617
2618 cpufreq_global_kobject = kobject_create();
2619 BUG_ON(!cpufreq_global_kobject);
2620
2621 register_syscore_ops(&cpufreq_syscore_ops);
2622
2623 return 0;
2624 }
2625 core_initcall(cpufreq_core_init);
This page took 0.090493 seconds and 5 git commands to generate.