cpufreq: conservative: Update sample_delay_ns immediately
[deliverable/linux.git] / drivers / cpufreq / cpufreq_ondemand.c
1 /*
2 * drivers/cpufreq/cpufreq_ondemand.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/cpu.h>
16 #include <linux/percpu-defs.h>
17 #include <linux/slab.h>
18 #include <linux/tick.h>
19 #include "cpufreq_governor.h"
20
21 /* On-demand governor macros */
22 #define DEF_FREQUENCY_UP_THRESHOLD (80)
23 #define DEF_SAMPLING_DOWN_FACTOR (1)
24 #define MAX_SAMPLING_DOWN_FACTOR (100000)
25 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
26 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
27 #define MIN_FREQUENCY_UP_THRESHOLD (11)
28 #define MAX_FREQUENCY_UP_THRESHOLD (100)
29
30 static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
31
32 static struct od_ops od_ops;
33
34 static unsigned int default_powersave_bias;
35
36 static void ondemand_powersave_bias_init_cpu(int cpu)
37 {
38 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
39
40 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
41 dbs_info->freq_lo = 0;
42 }
43
44 /*
45 * Not all CPUs want IO time to be accounted as busy; this depends on how
46 * efficient idling at a higher frequency/voltage is.
47 * Pavel Machek says this is not so for various generations of AMD and old
48 * Intel systems.
49 * Mike Chan (android.com) claims this is also not true for ARM.
50 * Because of this, whitelist specific known (series) of CPUs by default, and
51 * leave all others up to the user.
52 */
53 static int should_io_be_busy(void)
54 {
55 #if defined(CONFIG_X86)
56 /*
57 * For Intel, Core 2 (model 15) and later have an efficient idle.
58 */
59 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
60 boot_cpu_data.x86 == 6 &&
61 boot_cpu_data.x86_model >= 15)
62 return 1;
63 #endif
64 return 0;
65 }
66
67 /*
68 * Find right freq to be set now with powersave_bias on.
69 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
70 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
71 */
72 static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
73 unsigned int freq_next, unsigned int relation)
74 {
75 unsigned int freq_req, freq_reduc, freq_avg;
76 unsigned int freq_hi, freq_lo;
77 unsigned int index = 0;
78 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
79 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
80 policy->cpu);
81 struct policy_dbs_info *policy_dbs = policy->governor_data;
82 struct dbs_data *dbs_data = policy_dbs->dbs_data;
83 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
84
85 if (!dbs_info->freq_table) {
86 dbs_info->freq_lo = 0;
87 dbs_info->freq_lo_jiffies = 0;
88 return freq_next;
89 }
90
91 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
92 relation, &index);
93 freq_req = dbs_info->freq_table[index].frequency;
94 freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
95 freq_avg = freq_req - freq_reduc;
96
97 /* Find freq bounds for freq_avg in freq_table */
98 index = 0;
99 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
100 CPUFREQ_RELATION_H, &index);
101 freq_lo = dbs_info->freq_table[index].frequency;
102 index = 0;
103 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
104 CPUFREQ_RELATION_L, &index);
105 freq_hi = dbs_info->freq_table[index].frequency;
106
107 /* Find out how long we have to be in hi and lo freqs */
108 if (freq_hi == freq_lo) {
109 dbs_info->freq_lo = 0;
110 dbs_info->freq_lo_jiffies = 0;
111 return freq_lo;
112 }
113 jiffies_total = usecs_to_jiffies(dbs_data->sampling_rate);
114 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
115 jiffies_hi += ((freq_hi - freq_lo) / 2);
116 jiffies_hi /= (freq_hi - freq_lo);
117 jiffies_lo = jiffies_total - jiffies_hi;
118 dbs_info->freq_lo = freq_lo;
119 dbs_info->freq_lo_jiffies = jiffies_lo;
120 dbs_info->freq_hi_jiffies = jiffies_hi;
121 return freq_hi;
122 }
123
124 static void ondemand_powersave_bias_init(void)
125 {
126 int i;
127 for_each_online_cpu(i) {
128 ondemand_powersave_bias_init_cpu(i);
129 }
130 }
131
132 static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
133 {
134 struct policy_dbs_info *policy_dbs = policy->governor_data;
135 struct dbs_data *dbs_data = policy_dbs->dbs_data;
136 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
137
138 if (od_tuners->powersave_bias)
139 freq = od_ops.powersave_bias_target(policy, freq,
140 CPUFREQ_RELATION_H);
141 else if (policy->cur == policy->max)
142 return;
143
144 __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
145 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
146 }
147
148 /*
149 * Every sampling_rate, we check, if current idle time is less than 20%
150 * (default), then we try to increase frequency. Else, we adjust the frequency
151 * proportional to load.
152 */
153 static void od_check_cpu(int cpu, unsigned int load)
154 {
155 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
156 struct policy_dbs_info *policy_dbs = dbs_info->cdbs.policy_dbs;
157 struct cpufreq_policy *policy = policy_dbs->policy;
158 struct dbs_data *dbs_data = policy_dbs->dbs_data;
159 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
160
161 dbs_info->freq_lo = 0;
162
163 /* Check for frequency increase */
164 if (load > dbs_data->up_threshold) {
165 /* If switching to max speed, apply sampling_down_factor */
166 if (policy->cur < policy->max)
167 dbs_info->rate_mult = dbs_data->sampling_down_factor;
168 dbs_freq_increase(policy, policy->max);
169 } else {
170 /* Calculate the next frequency proportional to load */
171 unsigned int freq_next, min_f, max_f;
172
173 min_f = policy->cpuinfo.min_freq;
174 max_f = policy->cpuinfo.max_freq;
175 freq_next = min_f + load * (max_f - min_f) / 100;
176
177 /* No longer fully busy, reset rate_mult */
178 dbs_info->rate_mult = 1;
179
180 if (!od_tuners->powersave_bias) {
181 __cpufreq_driver_target(policy, freq_next,
182 CPUFREQ_RELATION_C);
183 return;
184 }
185
186 freq_next = od_ops.powersave_bias_target(policy, freq_next,
187 CPUFREQ_RELATION_L);
188 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
189 }
190 }
191
192 static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
193 {
194 struct policy_dbs_info *policy_dbs = policy->governor_data;
195 struct dbs_data *dbs_data = policy_dbs->dbs_data;
196 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
197 int delay = 0, sample_type = dbs_info->sample_type;
198
199 /* Common NORMAL_SAMPLE setup */
200 dbs_info->sample_type = OD_NORMAL_SAMPLE;
201 if (sample_type == OD_SUB_SAMPLE) {
202 delay = dbs_info->freq_lo_jiffies;
203 __cpufreq_driver_target(policy, dbs_info->freq_lo,
204 CPUFREQ_RELATION_H);
205 } else {
206 dbs_check_cpu(policy);
207 if (dbs_info->freq_lo) {
208 /* Setup timer for SUB_SAMPLE */
209 dbs_info->sample_type = OD_SUB_SAMPLE;
210 delay = dbs_info->freq_hi_jiffies;
211 }
212 }
213
214 if (!delay)
215 delay = delay_for_sampling_rate(dbs_data->sampling_rate
216 * dbs_info->rate_mult);
217
218 return delay;
219 }
220
221 /************************** sysfs interface ************************/
222 static struct dbs_governor od_dbs_gov;
223
224 static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
225 size_t count)
226 {
227 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
228 unsigned int input;
229 int ret;
230 unsigned int j;
231
232 ret = sscanf(buf, "%u", &input);
233 if (ret != 1)
234 return -EINVAL;
235 od_tuners->io_is_busy = !!input;
236
237 /* we need to re-evaluate prev_cpu_idle */
238 for_each_online_cpu(j) {
239 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
240 j);
241 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
242 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
243 }
244 return count;
245 }
246
247 static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
248 size_t count)
249 {
250 unsigned int input;
251 int ret;
252 ret = sscanf(buf, "%u", &input);
253
254 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
255 input < MIN_FREQUENCY_UP_THRESHOLD) {
256 return -EINVAL;
257 }
258
259 dbs_data->up_threshold = input;
260 return count;
261 }
262
263 static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
264 const char *buf, size_t count)
265 {
266 unsigned int input, j;
267 int ret;
268 ret = sscanf(buf, "%u", &input);
269
270 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
271 return -EINVAL;
272 dbs_data->sampling_down_factor = input;
273
274 /* Reset down sampling multiplier in case it was active */
275 for_each_online_cpu(j) {
276 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
277 j);
278 dbs_info->rate_mult = 1;
279 }
280 return count;
281 }
282
283 static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
284 const char *buf, size_t count)
285 {
286 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
287 unsigned int input;
288 int ret;
289
290 unsigned int j;
291
292 ret = sscanf(buf, "%u", &input);
293 if (ret != 1)
294 return -EINVAL;
295
296 if (input > 1)
297 input = 1;
298
299 if (input == dbs_data->ignore_nice_load) { /* nothing to do */
300 return count;
301 }
302 dbs_data->ignore_nice_load = input;
303
304 /* we need to re-evaluate prev_cpu_idle */
305 for_each_online_cpu(j) {
306 struct od_cpu_dbs_info_s *dbs_info;
307 dbs_info = &per_cpu(od_cpu_dbs_info, j);
308 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
309 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
310 if (dbs_data->ignore_nice_load)
311 dbs_info->cdbs.prev_cpu_nice =
312 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
313
314 }
315 return count;
316 }
317
318 static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
319 size_t count)
320 {
321 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
322 unsigned int input;
323 int ret;
324 ret = sscanf(buf, "%u", &input);
325
326 if (ret != 1)
327 return -EINVAL;
328
329 if (input > 1000)
330 input = 1000;
331
332 od_tuners->powersave_bias = input;
333 ondemand_powersave_bias_init();
334 return count;
335 }
336
337 gov_show_one_common(sampling_rate);
338 gov_show_one_common(up_threshold);
339 gov_show_one_common(sampling_down_factor);
340 gov_show_one_common(ignore_nice_load);
341 gov_show_one_common(min_sampling_rate);
342 gov_show_one(od, io_is_busy);
343 gov_show_one(od, powersave_bias);
344
345 gov_attr_rw(sampling_rate);
346 gov_attr_rw(io_is_busy);
347 gov_attr_rw(up_threshold);
348 gov_attr_rw(sampling_down_factor);
349 gov_attr_rw(ignore_nice_load);
350 gov_attr_rw(powersave_bias);
351 gov_attr_ro(min_sampling_rate);
352
353 static struct attribute *od_attributes[] = {
354 &min_sampling_rate.attr,
355 &sampling_rate.attr,
356 &up_threshold.attr,
357 &sampling_down_factor.attr,
358 &ignore_nice_load.attr,
359 &powersave_bias.attr,
360 &io_is_busy.attr,
361 NULL
362 };
363
364 /************************** sysfs end ************************/
365
366 static int od_init(struct dbs_data *dbs_data, bool notify)
367 {
368 struct od_dbs_tuners *tuners;
369 u64 idle_time;
370 int cpu;
371
372 tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
373 if (!tuners) {
374 pr_err("%s: kzalloc failed\n", __func__);
375 return -ENOMEM;
376 }
377
378 cpu = get_cpu();
379 idle_time = get_cpu_idle_time_us(cpu, NULL);
380 put_cpu();
381 if (idle_time != -1ULL) {
382 /* Idle micro accounting is supported. Use finer thresholds */
383 dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
384 /*
385 * In nohz/micro accounting case we set the minimum frequency
386 * not depending on HZ, but fixed (very low). The deferred
387 * timer might skip some samples if idle/sleeping as needed.
388 */
389 dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
390 } else {
391 dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
392
393 /* For correct statistics, we need 10 ticks for each measure */
394 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
395 jiffies_to_usecs(10);
396 }
397
398 dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
399 dbs_data->ignore_nice_load = 0;
400 tuners->powersave_bias = default_powersave_bias;
401 tuners->io_is_busy = should_io_be_busy();
402
403 dbs_data->tuners = tuners;
404 return 0;
405 }
406
407 static void od_exit(struct dbs_data *dbs_data, bool notify)
408 {
409 kfree(dbs_data->tuners);
410 }
411
412 define_get_cpu_dbs_routines(od_cpu_dbs_info);
413
414 static struct od_ops od_ops = {
415 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
416 .powersave_bias_target = generic_powersave_bias_target,
417 .freq_increase = dbs_freq_increase,
418 };
419
420 static struct dbs_governor od_dbs_gov = {
421 .gov = {
422 .name = "ondemand",
423 .governor = cpufreq_governor_dbs,
424 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
425 .owner = THIS_MODULE,
426 },
427 .governor = GOV_ONDEMAND,
428 .kobj_type = { .default_attrs = od_attributes },
429 .get_cpu_cdbs = get_cpu_cdbs,
430 .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
431 .gov_dbs_timer = od_dbs_timer,
432 .gov_check_cpu = od_check_cpu,
433 .gov_ops = &od_ops,
434 .init = od_init,
435 .exit = od_exit,
436 };
437
438 #define CPU_FREQ_GOV_ONDEMAND (&od_dbs_gov.gov)
439
440 static void od_set_powersave_bias(unsigned int powersave_bias)
441 {
442 struct cpufreq_policy *policy;
443 struct dbs_data *dbs_data;
444 struct od_dbs_tuners *od_tuners;
445 unsigned int cpu;
446 cpumask_t done;
447
448 default_powersave_bias = powersave_bias;
449 cpumask_clear(&done);
450
451 get_online_cpus();
452 for_each_online_cpu(cpu) {
453 struct policy_dbs_info *policy_dbs;
454
455 if (cpumask_test_cpu(cpu, &done))
456 continue;
457
458 policy_dbs = per_cpu(od_cpu_dbs_info, cpu).cdbs.policy_dbs;
459 if (!policy_dbs)
460 continue;
461
462 policy = policy_dbs->policy;
463 cpumask_or(&done, &done, policy->cpus);
464
465 if (policy->governor != CPU_FREQ_GOV_ONDEMAND)
466 continue;
467
468 dbs_data = policy_dbs->dbs_data;
469 od_tuners = dbs_data->tuners;
470 od_tuners->powersave_bias = default_powersave_bias;
471 }
472 put_online_cpus();
473 }
474
475 void od_register_powersave_bias_handler(unsigned int (*f)
476 (struct cpufreq_policy *, unsigned int, unsigned int),
477 unsigned int powersave_bias)
478 {
479 od_ops.powersave_bias_target = f;
480 od_set_powersave_bias(powersave_bias);
481 }
482 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
483
484 void od_unregister_powersave_bias_handler(void)
485 {
486 od_ops.powersave_bias_target = generic_powersave_bias_target;
487 od_set_powersave_bias(0);
488 }
489 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
490
491 static int __init cpufreq_gov_dbs_init(void)
492 {
493 return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND);
494 }
495
496 static void __exit cpufreq_gov_dbs_exit(void)
497 {
498 cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND);
499 }
500
501 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
502 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
503 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
504 "Low Latency Frequency Transition capable processors");
505 MODULE_LICENSE("GPL");
506
507 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
508 struct cpufreq_governor *cpufreq_default_governor(void)
509 {
510 return CPU_FREQ_GOV_ONDEMAND;
511 }
512
513 fs_initcall(cpufreq_gov_dbs_init);
514 #else
515 module_init(cpufreq_gov_dbs_init);
516 #endif
517 module_exit(cpufreq_gov_dbs_exit);
This page took 0.054391 seconds and 5 git commands to generate.