2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/smp.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/ctype.h>
19 #include <linux/cpufreq.h>
20 #include <linux/sysctl.h>
21 #include <linux/types.h>
23 #include <linux/sysfs.h>
24 #include <linux/cpu.h>
25 #include <linux/sched.h>
26 #include <linux/kmod.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/percpu.h>
31 #include <linux/mutex.h>
34 * dbs is used in this file as a shortform for demandbased switching
35 * It helps to keep variable names smaller, simpler
38 #define DEF_FREQUENCY_UP_THRESHOLD (80)
39 #define MIN_FREQUENCY_UP_THRESHOLD (11)
40 #define MAX_FREQUENCY_UP_THRESHOLD (100)
43 * The polling frequency of this governor depends on the capability of
44 * the processor. Default polling frequency is 1000 times the transition
45 * latency of the processor. The governor will work on any processor with
46 * transition latency <= 10mS, using appropriate sampling
48 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
49 * this governor will not work.
50 * All times here are in uS.
52 static unsigned int def_sampling_rate
;
53 #define MIN_SAMPLING_RATE_RATIO (2)
54 /* for correct statistics, we need at least 10 ticks between each measure */
55 #define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
56 #define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
57 #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
58 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
59 #define TRANSITION_LATENCY_LIMIT (10 * 1000)
61 static void do_dbs_timer(void *data
);
63 struct cpu_dbs_info_s
{
64 cputime64_t prev_cpu_idle
;
65 cputime64_t prev_cpu_wall
;
66 struct cpufreq_policy
*cur_policy
;
67 struct work_struct work
;
70 static DEFINE_PER_CPU(struct cpu_dbs_info_s
, cpu_dbs_info
);
72 static unsigned int dbs_enable
; /* number of CPUs using this policy */
75 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
76 * lock and dbs_mutex. cpu_hotplug lock should always be held before
77 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
78 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
79 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
80 * is recursive for the same process. -Venki
82 static DEFINE_MUTEX (dbs_mutex
);
83 static DECLARE_WORK (dbs_work
, do_dbs_timer
, NULL
);
85 static struct workqueue_struct
*kondemand_wq
;
88 unsigned int sampling_rate
;
89 unsigned int up_threshold
;
90 unsigned int ignore_nice
;
93 static struct dbs_tuners dbs_tuners_ins
= {
94 .up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
,
98 static inline cputime64_t
get_cpu_idle_time(unsigned int cpu
)
102 retval
= cputime64_add(kstat_cpu(cpu
).cpustat
.idle
,
103 kstat_cpu(cpu
).cpustat
.iowait
);
105 if (dbs_tuners_ins
.ignore_nice
)
106 retval
= cputime64_add(retval
, kstat_cpu(cpu
).cpustat
.nice
);
111 /************************** sysfs interface ************************/
112 static ssize_t
show_sampling_rate_max(struct cpufreq_policy
*policy
, char *buf
)
114 return sprintf (buf
, "%u\n", MAX_SAMPLING_RATE
);
117 static ssize_t
show_sampling_rate_min(struct cpufreq_policy
*policy
, char *buf
)
119 return sprintf (buf
, "%u\n", MIN_SAMPLING_RATE
);
122 #define define_one_ro(_name) \
123 static struct freq_attr _name = \
124 __ATTR(_name, 0444, show_##_name, NULL)
126 define_one_ro(sampling_rate_max
);
127 define_one_ro(sampling_rate_min
);
129 /* cpufreq_ondemand Governor Tunables */
130 #define show_one(file_name, object) \
131 static ssize_t show_##file_name \
132 (struct cpufreq_policy *unused, char *buf) \
134 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
136 show_one(sampling_rate
, sampling_rate
);
137 show_one(up_threshold
, up_threshold
);
138 show_one(ignore_nice_load
, ignore_nice
);
140 static ssize_t
store_sampling_rate(struct cpufreq_policy
*unused
,
141 const char *buf
, size_t count
)
145 ret
= sscanf (buf
, "%u", &input
);
147 mutex_lock(&dbs_mutex
);
148 if (ret
!= 1 || input
> MAX_SAMPLING_RATE
|| input
< MIN_SAMPLING_RATE
) {
149 mutex_unlock(&dbs_mutex
);
153 dbs_tuners_ins
.sampling_rate
= input
;
154 mutex_unlock(&dbs_mutex
);
159 static ssize_t
store_up_threshold(struct cpufreq_policy
*unused
,
160 const char *buf
, size_t count
)
164 ret
= sscanf (buf
, "%u", &input
);
166 mutex_lock(&dbs_mutex
);
167 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
168 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
169 mutex_unlock(&dbs_mutex
);
173 dbs_tuners_ins
.up_threshold
= input
;
174 mutex_unlock(&dbs_mutex
);
179 static ssize_t
store_ignore_nice_load(struct cpufreq_policy
*policy
,
180 const char *buf
, size_t count
)
187 ret
= sscanf (buf
, "%u", &input
);
194 mutex_lock(&dbs_mutex
);
195 if ( input
== dbs_tuners_ins
.ignore_nice
) { /* nothing to do */
196 mutex_unlock(&dbs_mutex
);
199 dbs_tuners_ins
.ignore_nice
= input
;
201 /* we need to re-evaluate prev_cpu_idle */
202 for_each_online_cpu(j
) {
203 struct cpu_dbs_info_s
*dbs_info
;
204 dbs_info
= &per_cpu(cpu_dbs_info
, j
);
205 dbs_info
->prev_cpu_idle
= get_cpu_idle_time(j
);
206 dbs_info
->prev_cpu_wall
= get_jiffies_64();
208 mutex_unlock(&dbs_mutex
);
213 #define define_one_rw(_name) \
214 static struct freq_attr _name = \
215 __ATTR(_name, 0644, show_##_name, store_##_name)
217 define_one_rw(sampling_rate
);
218 define_one_rw(up_threshold
);
219 define_one_rw(ignore_nice_load
);
221 static struct attribute
* dbs_attributes
[] = {
222 &sampling_rate_max
.attr
,
223 &sampling_rate_min
.attr
,
226 &ignore_nice_load
.attr
,
230 static struct attribute_group dbs_attr_group
= {
231 .attrs
= dbs_attributes
,
235 /************************** sysfs end ************************/
237 static void dbs_check_cpu(struct cpu_dbs_info_s
*this_dbs_info
)
239 unsigned int idle_ticks
, total_ticks
;
241 cputime64_t cur_jiffies
;
243 struct cpufreq_policy
*policy
;
246 if (!this_dbs_info
->enable
)
249 policy
= this_dbs_info
->cur_policy
;
250 cur_jiffies
= jiffies64_to_cputime64(get_jiffies_64());
251 total_ticks
= (unsigned int) cputime64_sub(cur_jiffies
,
252 this_dbs_info
->prev_cpu_wall
);
253 this_dbs_info
->prev_cpu_wall
= cur_jiffies
;
255 * Every sampling_rate, we check, if current idle time is less
256 * than 20% (default), then we try to increase frequency
257 * Every sampling_rate, we look for a the lowest
258 * frequency which can sustain the load while keeping idle time over
259 * 30%. If such a frequency exist, we try to decrease to this frequency.
261 * Any frequency increase takes it to the maximum frequency.
262 * Frequency reduction happens at minimum steps of
263 * 5% (default) of current frequency
267 idle_ticks
= UINT_MAX
;
268 for_each_cpu_mask(j
, policy
->cpus
) {
269 cputime64_t total_idle_ticks
;
270 unsigned int tmp_idle_ticks
;
271 struct cpu_dbs_info_s
*j_dbs_info
;
273 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
274 total_idle_ticks
= get_cpu_idle_time(j
);
275 tmp_idle_ticks
= (unsigned int) cputime64_sub(total_idle_ticks
,
276 j_dbs_info
->prev_cpu_idle
);
277 j_dbs_info
->prev_cpu_idle
= total_idle_ticks
;
279 if (tmp_idle_ticks
< idle_ticks
)
280 idle_ticks
= tmp_idle_ticks
;
282 load
= (100 * (total_ticks
- idle_ticks
)) / total_ticks
;
284 /* Check for frequency increase */
285 if (load
> dbs_tuners_ins
.up_threshold
) {
286 /* if we are already at full speed then break out early */
287 if (policy
->cur
== policy
->max
)
290 __cpufreq_driver_target(policy
, policy
->max
,
295 /* Check for frequency decrease */
296 /* if we cannot reduce the frequency anymore, break out early */
297 if (policy
->cur
== policy
->min
)
301 * The optimal frequency is the frequency that is the lowest that
302 * can support the current CPU usage without triggering the up
303 * policy. To be safe, we focus 10 points under the threshold.
305 if (load
< (dbs_tuners_ins
.up_threshold
- 10)) {
306 unsigned int freq_next
;
307 freq_next
= (policy
->cur
* load
) /
308 (dbs_tuners_ins
.up_threshold
- 10);
310 __cpufreq_driver_target(policy
, freq_next
, CPUFREQ_RELATION_L
);
314 static void do_dbs_timer(void *data
)
316 unsigned int cpu
= smp_processor_id();
317 struct cpu_dbs_info_s
*dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
319 dbs_check_cpu(dbs_info
);
320 queue_delayed_work_on(cpu
, kondemand_wq
, &dbs_info
->work
,
321 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
));
324 static inline void dbs_timer_init(unsigned int cpu
)
326 struct cpu_dbs_info_s
*dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
328 INIT_WORK(&dbs_info
->work
, do_dbs_timer
, 0);
329 queue_delayed_work_on(cpu
, kondemand_wq
, &dbs_info
->work
,
330 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
));
334 static inline void dbs_timer_exit(unsigned int cpu
)
336 struct cpu_dbs_info_s
*dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
338 cancel_rearming_delayed_workqueue(kondemand_wq
, &dbs_info
->work
);
341 static int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
344 unsigned int cpu
= policy
->cpu
;
345 struct cpu_dbs_info_s
*this_dbs_info
;
348 this_dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
351 case CPUFREQ_GOV_START
:
352 if ((!cpu_online(cpu
)) ||
356 if (policy
->cpuinfo
.transition_latency
>
357 (TRANSITION_LATENCY_LIMIT
* 1000)) {
358 printk(KERN_WARNING
"ondemand governor failed to load "
359 "due to too long transition latency\n");
362 if (this_dbs_info
->enable
) /* Already enabled */
365 mutex_lock(&dbs_mutex
);
367 if (dbs_enable
== 1) {
368 kondemand_wq
= create_workqueue("kondemand");
370 printk(KERN_ERR
"Creation of kondemand failed\n");
372 mutex_unlock(&dbs_mutex
);
376 for_each_cpu_mask(j
, policy
->cpus
) {
377 struct cpu_dbs_info_s
*j_dbs_info
;
378 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
379 j_dbs_info
->cur_policy
= policy
;
381 j_dbs_info
->prev_cpu_idle
= get_cpu_idle_time(j
);
382 j_dbs_info
->prev_cpu_wall
= get_jiffies_64();
384 this_dbs_info
->enable
= 1;
385 sysfs_create_group(&policy
->kobj
, &dbs_attr_group
);
387 * Start the timerschedule work, when this governor
388 * is used for first time
390 if (dbs_enable
== 1) {
391 unsigned int latency
;
392 /* policy latency is in nS. Convert it to uS first */
393 latency
= policy
->cpuinfo
.transition_latency
/ 1000;
397 def_sampling_rate
= latency
*
398 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER
;
400 if (def_sampling_rate
< MIN_STAT_SAMPLING_RATE
)
401 def_sampling_rate
= MIN_STAT_SAMPLING_RATE
;
403 dbs_tuners_ins
.sampling_rate
= def_sampling_rate
;
405 dbs_timer_init(policy
->cpu
);
407 mutex_unlock(&dbs_mutex
);
410 case CPUFREQ_GOV_STOP
:
411 mutex_lock(&dbs_mutex
);
412 dbs_timer_exit(policy
->cpu
);
413 this_dbs_info
->enable
= 0;
414 sysfs_remove_group(&policy
->kobj
, &dbs_attr_group
);
417 destroy_workqueue(kondemand_wq
);
419 mutex_unlock(&dbs_mutex
);
423 case CPUFREQ_GOV_LIMITS
:
425 mutex_lock(&dbs_mutex
);
426 if (policy
->max
< this_dbs_info
->cur_policy
->cur
)
427 __cpufreq_driver_target(
428 this_dbs_info
->cur_policy
,
429 policy
->max
, CPUFREQ_RELATION_H
);
430 else if (policy
->min
> this_dbs_info
->cur_policy
->cur
)
431 __cpufreq_driver_target(
432 this_dbs_info
->cur_policy
,
433 policy
->min
, CPUFREQ_RELATION_L
);
434 mutex_unlock(&dbs_mutex
);
435 unlock_cpu_hotplug();
441 static struct cpufreq_governor cpufreq_gov_dbs
= {
443 .governor
= cpufreq_governor_dbs
,
444 .owner
= THIS_MODULE
,
447 static int __init
cpufreq_gov_dbs_init(void)
449 return cpufreq_register_governor(&cpufreq_gov_dbs
);
452 static void __exit
cpufreq_gov_dbs_exit(void)
454 cpufreq_unregister_governor(&cpufreq_gov_dbs
);
458 MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
459 MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for "
460 "Low Latency Frequency Transition capable processors");
461 MODULE_LICENSE ("GPL");
463 module_init(cpufreq_gov_dbs_init
);
464 module_exit(cpufreq_gov_dbs_exit
);