Commit | Line | Data |
---|---|---|
2aacdfff | 1 | /* |
2 | * drivers/cpufreq/cpufreq_governor.c | |
3 | * | |
4 | * CPUFREQ governors common code | |
5 | * | |
4471a34f VK |
6 | * Copyright (C) 2001 Russell King |
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | |
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | |
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | |
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | |
11 | * | |
2aacdfff | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
4471a34f VK |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | ||
2aacdfff | 19 | #include <linux/export.h> |
20 | #include <linux/kernel_stat.h> | |
4d5dcc42 | 21 | #include <linux/slab.h> |
4471a34f VK |
22 | |
23 | #include "cpufreq_governor.h" | |
24 | ||
4d5dcc42 VK |
25 | static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) |
26 | { | |
27 | if (have_governor_per_policy()) | |
28 | return dbs_data->cdata->attr_group_gov_pol; | |
29 | else | |
30 | return dbs_data->cdata->attr_group_gov_sys; | |
31 | } | |
32 | ||
4471a34f VK |
33 | void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) |
34 | { | |
875b8508 | 35 | struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); |
4471a34f VK |
36 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
37 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | |
44152cb8 | 38 | struct cpufreq_policy *policy = cdbs->shared->policy; |
18b46abd | 39 | unsigned int sampling_rate; |
4471a34f VK |
40 | unsigned int max_load = 0; |
41 | unsigned int ignore_nice; | |
42 | unsigned int j; | |
43 | ||
18b46abd SB |
44 | if (dbs_data->cdata->governor == GOV_ONDEMAND) { |
45 | struct od_cpu_dbs_info_s *od_dbs_info = | |
46 | dbs_data->cdata->get_cpu_dbs_info_s(cpu); | |
47 | ||
48 | /* | |
49 | * Sometimes, the ondemand governor uses an additional | |
50 | * multiplier to give long delays. So apply this multiplier to | |
51 | * the 'sampling_rate', so as to keep the wake-up-from-idle | |
52 | * detection logic a bit conservative. | |
53 | */ | |
54 | sampling_rate = od_tuners->sampling_rate; | |
55 | sampling_rate *= od_dbs_info->rate_mult; | |
56 | ||
6c4640c3 | 57 | ignore_nice = od_tuners->ignore_nice_load; |
18b46abd SB |
58 | } else { |
59 | sampling_rate = cs_tuners->sampling_rate; | |
6c4640c3 | 60 | ignore_nice = cs_tuners->ignore_nice_load; |
18b46abd | 61 | } |
4471a34f | 62 | |
dfa5bb62 | 63 | /* Get Absolute Load */ |
4471a34f | 64 | for_each_cpu(j, policy->cpus) { |
875b8508 | 65 | struct cpu_dbs_info *j_cdbs; |
9366d840 SK |
66 | u64 cur_wall_time, cur_idle_time; |
67 | unsigned int idle_time, wall_time; | |
4471a34f | 68 | unsigned int load; |
9366d840 | 69 | int io_busy = 0; |
4471a34f | 70 | |
4d5dcc42 | 71 | j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); |
4471a34f | 72 | |
9366d840 SK |
73 | /* |
74 | * For the purpose of ondemand, waiting for disk IO is | |
75 | * an indication that you're performance critical, and | |
76 | * not that the system is actually idle. So do not add | |
77 | * the iowait time to the cpu idle time. | |
78 | */ | |
79 | if (dbs_data->cdata->governor == GOV_ONDEMAND) | |
80 | io_busy = od_tuners->io_is_busy; | |
81 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); | |
4471a34f VK |
82 | |
83 | wall_time = (unsigned int) | |
84 | (cur_wall_time - j_cdbs->prev_cpu_wall); | |
85 | j_cdbs->prev_cpu_wall = cur_wall_time; | |
86 | ||
0df35026 CY |
87 | if (cur_idle_time < j_cdbs->prev_cpu_idle) |
88 | cur_idle_time = j_cdbs->prev_cpu_idle; | |
89 | ||
4471a34f VK |
90 | idle_time = (unsigned int) |
91 | (cur_idle_time - j_cdbs->prev_cpu_idle); | |
92 | j_cdbs->prev_cpu_idle = cur_idle_time; | |
93 | ||
94 | if (ignore_nice) { | |
95 | u64 cur_nice; | |
96 | unsigned long cur_nice_jiffies; | |
97 | ||
98 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - | |
99 | cdbs->prev_cpu_nice; | |
100 | /* | |
101 | * Assumption: nice time between sampling periods will | |
102 | * be less than 2^32 jiffies for 32 bit sys | |
103 | */ | |
104 | cur_nice_jiffies = (unsigned long) | |
105 | cputime64_to_jiffies64(cur_nice); | |
106 | ||
107 | cdbs->prev_cpu_nice = | |
108 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
109 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | |
110 | } | |
111 | ||
4471a34f VK |
112 | if (unlikely(!wall_time || wall_time < idle_time)) |
113 | continue; | |
114 | ||
18b46abd SB |
115 | /* |
116 | * If the CPU had gone completely idle, and a task just woke up | |
117 | * on this CPU now, it would be unfair to calculate 'load' the | |
118 | * usual way for this elapsed time-window, because it will show | |
119 | * near-zero load, irrespective of how CPU intensive that task | |
120 | * actually is. This is undesirable for latency-sensitive bursty | |
121 | * workloads. | |
122 | * | |
123 | * To avoid this, we reuse the 'load' from the previous | |
124 | * time-window and give this task a chance to start with a | |
125 | * reasonably high CPU frequency. (However, we shouldn't over-do | |
126 | * this copy, lest we get stuck at a high load (high frequency) | |
127 | * for too long, even when the current system load has actually | |
128 | * dropped down. So we perform the copy only once, upon the | |
129 | * first wake-up from idle.) | |
130 | * | |
131 | * Detecting this situation is easy: the governor's deferrable | |
132 | * timer would not have fired during CPU-idle periods. Hence | |
133 | * an unusually large 'wall_time' (as compared to the sampling | |
134 | * rate) indicates this scenario. | |
c8ae481b VK |
135 | * |
136 | * prev_load can be zero in two cases and we must recalculate it | |
137 | * for both cases: | |
138 | * - during long idle intervals | |
139 | * - explicitly set to zero | |
18b46abd | 140 | */ |
c8ae481b VK |
141 | if (unlikely(wall_time > (2 * sampling_rate) && |
142 | j_cdbs->prev_load)) { | |
18b46abd | 143 | load = j_cdbs->prev_load; |
c8ae481b VK |
144 | |
145 | /* | |
146 | * Perform a destructive copy, to ensure that we copy | |
147 | * the previous load only once, upon the first wake-up | |
148 | * from idle. | |
149 | */ | |
150 | j_cdbs->prev_load = 0; | |
18b46abd SB |
151 | } else { |
152 | load = 100 * (wall_time - idle_time) / wall_time; | |
153 | j_cdbs->prev_load = load; | |
18b46abd | 154 | } |
4471a34f | 155 | |
4471a34f VK |
156 | if (load > max_load) |
157 | max_load = load; | |
158 | } | |
159 | ||
4d5dcc42 | 160 | dbs_data->cdata->gov_check_cpu(cpu, max_load); |
4471a34f VK |
161 | } |
162 | EXPORT_SYMBOL_GPL(dbs_check_cpu); | |
163 | ||
70f43e5e | 164 | void gov_add_timers(struct cpufreq_policy *policy, unsigned int delay) |
4471a34f | 165 | { |
70f43e5e VK |
166 | struct dbs_data *dbs_data = policy->governor_data; |
167 | struct cpu_dbs_info *cdbs; | |
168 | int cpu; | |
031299b3 | 169 | |
70f43e5e VK |
170 | for_each_cpu(cpu, policy->cpus) { |
171 | cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); | |
172 | cdbs->timer.expires = jiffies + delay; | |
173 | add_timer_on(&cdbs->timer, cpu); | |
031299b3 VK |
174 | } |
175 | } | |
70f43e5e | 176 | EXPORT_SYMBOL_GPL(gov_add_timers); |
031299b3 | 177 | |
70f43e5e | 178 | static inline void gov_cancel_timers(struct cpufreq_policy *policy) |
031299b3 | 179 | { |
70f43e5e | 180 | struct dbs_data *dbs_data = policy->governor_data; |
875b8508 | 181 | struct cpu_dbs_info *cdbs; |
031299b3 | 182 | int i; |
58ddcead | 183 | |
031299b3 VK |
184 | for_each_cpu(i, policy->cpus) { |
185 | cdbs = dbs_data->cdata->get_cpu_cdbs(i); | |
70f43e5e | 186 | del_timer_sync(&cdbs->timer); |
031299b3 | 187 | } |
4471a34f VK |
188 | } |
189 | ||
70f43e5e VK |
190 | void gov_cancel_work(struct cpu_common_dbs_info *shared) |
191 | { | |
2dd3e724 RW |
192 | /* Tell dbs_timer_handler() to skip queuing up work items. */ |
193 | atomic_inc(&shared->skip_work); | |
70f43e5e | 194 | /* |
2dd3e724 RW |
195 | * If dbs_timer_handler() is already running, it may not notice the |
196 | * incremented skip_work, so wait for it to complete to prevent its work | |
197 | * item from being queued up after the cancel_work_sync() below. | |
198 | */ | |
199 | gov_cancel_timers(shared->policy); | |
200 | /* | |
201 | * In case dbs_timer_handler() managed to run and spawn a work item | |
202 | * before the timers have been canceled, wait for that work item to | |
203 | * complete and then cancel all of the timers set up by it. If | |
204 | * dbs_timer_handler() runs again at that point, it will see the | |
205 | * positive value of skip_work and won't spawn any more work items. | |
70f43e5e | 206 | */ |
70f43e5e | 207 | cancel_work_sync(&shared->work); |
70f43e5e | 208 | gov_cancel_timers(shared->policy); |
2dd3e724 | 209 | atomic_set(&shared->skip_work, 0); |
70f43e5e VK |
210 | } |
211 | EXPORT_SYMBOL_GPL(gov_cancel_work); | |
212 | ||
4447266b | 213 | /* Will return if we need to evaluate cpu load again or not */ |
43e0ee36 VK |
214 | static bool need_load_eval(struct cpu_common_dbs_info *shared, |
215 | unsigned int sampling_rate) | |
4447266b | 216 | { |
44152cb8 | 217 | if (policy_is_shared(shared->policy)) { |
4447266b | 218 | ktime_t time_now = ktime_get(); |
44152cb8 | 219 | s64 delta_us = ktime_us_delta(time_now, shared->time_stamp); |
4447266b VK |
220 | |
221 | /* Do nothing if we recently have sampled */ | |
222 | if (delta_us < (s64)(sampling_rate / 2)) | |
223 | return false; | |
224 | else | |
44152cb8 | 225 | shared->time_stamp = time_now; |
4447266b VK |
226 | } |
227 | ||
228 | return true; | |
229 | } | |
43e0ee36 | 230 | |
70f43e5e | 231 | static void dbs_work_handler(struct work_struct *work) |
43e0ee36 | 232 | { |
70f43e5e VK |
233 | struct cpu_common_dbs_info *shared = container_of(work, struct |
234 | cpu_common_dbs_info, work); | |
3a91b069 VK |
235 | struct cpufreq_policy *policy; |
236 | struct dbs_data *dbs_data; | |
43e0ee36 | 237 | unsigned int sampling_rate, delay; |
70f43e5e | 238 | bool eval_load; |
43e0ee36 | 239 | |
3a91b069 | 240 | policy = shared->policy; |
3a91b069 VK |
241 | dbs_data = policy->governor_data; |
242 | ||
70f43e5e VK |
243 | /* Kill all timers */ |
244 | gov_cancel_timers(policy); | |
245 | ||
43e0ee36 VK |
246 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { |
247 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | |
248 | ||
249 | sampling_rate = cs_tuners->sampling_rate; | |
250 | } else { | |
251 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | |
252 | ||
253 | sampling_rate = od_tuners->sampling_rate; | |
254 | } | |
255 | ||
70f43e5e | 256 | eval_load = need_load_eval(shared, sampling_rate); |
43e0ee36 | 257 | |
70f43e5e VK |
258 | /* |
259 | * Make sure cpufreq_governor_limits() isn't evaluating load in | |
260 | * parallel. | |
261 | */ | |
262 | mutex_lock(&shared->timer_mutex); | |
263 | delay = dbs_data->cdata->gov_dbs_timer(policy, eval_load); | |
43e0ee36 | 264 | mutex_unlock(&shared->timer_mutex); |
70f43e5e | 265 | |
2dd3e724 | 266 | atomic_dec(&shared->skip_work); |
70f43e5e VK |
267 | |
268 | gov_add_timers(policy, delay); | |
269 | } | |
270 | ||
271 | static void dbs_timer_handler(unsigned long data) | |
272 | { | |
273 | struct cpu_dbs_info *cdbs = (struct cpu_dbs_info *)data; | |
274 | struct cpu_common_dbs_info *shared = cdbs->shared; | |
70f43e5e VK |
275 | |
276 | /* | |
2dd3e724 RW |
277 | * Timer handler may not be allowed to queue the work at the moment, |
278 | * because: | |
70f43e5e VK |
279 | * - Another timer handler has done that |
280 | * - We are stopping the governor | |
2dd3e724 | 281 | * - Or we are updating the sampling rate of the ondemand governor |
70f43e5e | 282 | */ |
2dd3e724 RW |
283 | if (atomic_inc_return(&shared->skip_work) > 1) |
284 | atomic_dec(&shared->skip_work); | |
285 | else | |
70f43e5e | 286 | queue_work(system_wq, &shared->work); |
43e0ee36 | 287 | } |
4447266b | 288 | |
4d5dcc42 VK |
289 | static void set_sampling_rate(struct dbs_data *dbs_data, |
290 | unsigned int sampling_rate) | |
291 | { | |
292 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | |
293 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | |
294 | cs_tuners->sampling_rate = sampling_rate; | |
295 | } else { | |
296 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | |
297 | od_tuners->sampling_rate = sampling_rate; | |
298 | } | |
299 | } | |
300 | ||
44152cb8 VK |
301 | static int alloc_common_dbs_info(struct cpufreq_policy *policy, |
302 | struct common_dbs_data *cdata) | |
303 | { | |
304 | struct cpu_common_dbs_info *shared; | |
305 | int j; | |
306 | ||
307 | /* Allocate memory for the common information for policy->cpus */ | |
308 | shared = kzalloc(sizeof(*shared), GFP_KERNEL); | |
309 | if (!shared) | |
310 | return -ENOMEM; | |
311 | ||
312 | /* Set shared for all CPUs, online+offline */ | |
313 | for_each_cpu(j, policy->related_cpus) | |
314 | cdata->get_cpu_cdbs(j)->shared = shared; | |
315 | ||
5e4500d8 | 316 | mutex_init(&shared->timer_mutex); |
2dd3e724 | 317 | atomic_set(&shared->skip_work, 0); |
70f43e5e | 318 | INIT_WORK(&shared->work, dbs_work_handler); |
44152cb8 VK |
319 | return 0; |
320 | } | |
321 | ||
322 | static void free_common_dbs_info(struct cpufreq_policy *policy, | |
323 | struct common_dbs_data *cdata) | |
324 | { | |
325 | struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu); | |
326 | struct cpu_common_dbs_info *shared = cdbs->shared; | |
327 | int j; | |
328 | ||
5e4500d8 VK |
329 | mutex_destroy(&shared->timer_mutex); |
330 | ||
44152cb8 VK |
331 | for_each_cpu(j, policy->cpus) |
332 | cdata->get_cpu_cdbs(j)->shared = NULL; | |
333 | ||
334 | kfree(shared); | |
335 | } | |
336 | ||
714a2d9c VK |
337 | static int cpufreq_governor_init(struct cpufreq_policy *policy, |
338 | struct dbs_data *dbs_data, | |
339 | struct common_dbs_data *cdata) | |
4471a34f | 340 | { |
714a2d9c VK |
341 | unsigned int latency; |
342 | int ret; | |
4471a34f | 343 | |
a72c4959 VK |
344 | /* State should be equivalent to EXIT */ |
345 | if (policy->governor_data) | |
346 | return -EBUSY; | |
347 | ||
714a2d9c VK |
348 | if (dbs_data) { |
349 | if (WARN_ON(have_governor_per_policy())) | |
350 | return -EINVAL; | |
44152cb8 VK |
351 | |
352 | ret = alloc_common_dbs_info(policy, cdata); | |
353 | if (ret) | |
354 | return ret; | |
355 | ||
714a2d9c VK |
356 | dbs_data->usage_count++; |
357 | policy->governor_data = dbs_data; | |
358 | return 0; | |
359 | } | |
4d5dcc42 | 360 | |
714a2d9c VK |
361 | dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); |
362 | if (!dbs_data) | |
363 | return -ENOMEM; | |
4d5dcc42 | 364 | |
44152cb8 VK |
365 | ret = alloc_common_dbs_info(policy, cdata); |
366 | if (ret) | |
367 | goto free_dbs_data; | |
368 | ||
714a2d9c VK |
369 | dbs_data->cdata = cdata; |
370 | dbs_data->usage_count = 1; | |
4d5dcc42 | 371 | |
714a2d9c VK |
372 | ret = cdata->init(dbs_data, !policy->governor->initialized); |
373 | if (ret) | |
44152cb8 | 374 | goto free_common_dbs_info; |
4d5dcc42 | 375 | |
714a2d9c VK |
376 | /* policy latency is in ns. Convert it to us first */ |
377 | latency = policy->cpuinfo.transition_latency / 1000; | |
378 | if (latency == 0) | |
379 | latency = 1; | |
4d5dcc42 | 380 | |
714a2d9c VK |
381 | /* Bring kernel and HW constraints together */ |
382 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | |
383 | MIN_LATENCY_MULTIPLIER * latency); | |
384 | set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, | |
385 | latency * LATENCY_MULTIPLIER)); | |
2361be23 | 386 | |
8eec1020 | 387 | if (!have_governor_per_policy()) |
714a2d9c | 388 | cdata->gdbs_data = dbs_data; |
4d5dcc42 | 389 | |
714a2d9c VK |
390 | ret = sysfs_create_group(get_governor_parent_kobj(policy), |
391 | get_sysfs_attr(dbs_data)); | |
392 | if (ret) | |
8eec1020 | 393 | goto reset_gdbs_data; |
4d5dcc42 | 394 | |
714a2d9c | 395 | policy->governor_data = dbs_data; |
4d5dcc42 | 396 | |
714a2d9c | 397 | return 0; |
4d5dcc42 | 398 | |
8eec1020 VK |
399 | reset_gdbs_data: |
400 | if (!have_governor_per_policy()) | |
714a2d9c | 401 | cdata->gdbs_data = NULL; |
714a2d9c | 402 | cdata->exit(dbs_data, !policy->governor->initialized); |
44152cb8 VK |
403 | free_common_dbs_info: |
404 | free_common_dbs_info(policy, cdata); | |
714a2d9c VK |
405 | free_dbs_data: |
406 | kfree(dbs_data); | |
407 | return ret; | |
408 | } | |
4d5dcc42 | 409 | |
a72c4959 VK |
410 | static int cpufreq_governor_exit(struct cpufreq_policy *policy, |
411 | struct dbs_data *dbs_data) | |
714a2d9c VK |
412 | { |
413 | struct common_dbs_data *cdata = dbs_data->cdata; | |
a72c4959 VK |
414 | struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu); |
415 | ||
416 | /* State should be equivalent to INIT */ | |
417 | if (!cdbs->shared || cdbs->shared->policy) | |
418 | return -EBUSY; | |
4d5dcc42 | 419 | |
714a2d9c VK |
420 | policy->governor_data = NULL; |
421 | if (!--dbs_data->usage_count) { | |
422 | sysfs_remove_group(get_governor_parent_kobj(policy), | |
423 | get_sysfs_attr(dbs_data)); | |
2361be23 | 424 | |
8eec1020 | 425 | if (!have_governor_per_policy()) |
4d5dcc42 | 426 | cdata->gdbs_data = NULL; |
4471a34f | 427 | |
714a2d9c VK |
428 | cdata->exit(dbs_data, policy->governor->initialized == 1); |
429 | kfree(dbs_data); | |
4d5dcc42 | 430 | } |
44152cb8 VK |
431 | |
432 | free_common_dbs_info(policy, cdata); | |
a72c4959 | 433 | return 0; |
714a2d9c | 434 | } |
4d5dcc42 | 435 | |
714a2d9c VK |
436 | static int cpufreq_governor_start(struct cpufreq_policy *policy, |
437 | struct dbs_data *dbs_data) | |
438 | { | |
439 | struct common_dbs_data *cdata = dbs_data->cdata; | |
440 | unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu; | |
49a9a40c | 441 | struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu); |
44152cb8 | 442 | struct cpu_common_dbs_info *shared = cdbs->shared; |
714a2d9c VK |
443 | int io_busy = 0; |
444 | ||
445 | if (!policy->cur) | |
446 | return -EINVAL; | |
447 | ||
a72c4959 VK |
448 | /* State should be equivalent to INIT */ |
449 | if (!shared || shared->policy) | |
450 | return -EBUSY; | |
451 | ||
714a2d9c VK |
452 | if (cdata->governor == GOV_CONSERVATIVE) { |
453 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | |
4d5dcc42 | 454 | |
4d5dcc42 | 455 | sampling_rate = cs_tuners->sampling_rate; |
6c4640c3 | 456 | ignore_nice = cs_tuners->ignore_nice_load; |
4471a34f | 457 | } else { |
714a2d9c VK |
458 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
459 | ||
4d5dcc42 | 460 | sampling_rate = od_tuners->sampling_rate; |
6c4640c3 | 461 | ignore_nice = od_tuners->ignore_nice_load; |
9366d840 | 462 | io_busy = od_tuners->io_is_busy; |
4471a34f VK |
463 | } |
464 | ||
44152cb8 VK |
465 | shared->policy = policy; |
466 | shared->time_stamp = ktime_get(); | |
44152cb8 | 467 | |
714a2d9c | 468 | for_each_cpu(j, policy->cpus) { |
875b8508 | 469 | struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j); |
714a2d9c | 470 | unsigned int prev_load; |
4471a34f | 471 | |
714a2d9c VK |
472 | j_cdbs->prev_cpu_idle = |
473 | get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); | |
4471a34f | 474 | |
714a2d9c VK |
475 | prev_load = (unsigned int)(j_cdbs->prev_cpu_wall - |
476 | j_cdbs->prev_cpu_idle); | |
477 | j_cdbs->prev_load = 100 * prev_load / | |
478 | (unsigned int)j_cdbs->prev_cpu_wall; | |
18b46abd | 479 | |
714a2d9c VK |
480 | if (ignore_nice) |
481 | j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
18b46abd | 482 | |
70f43e5e VK |
483 | __setup_timer(&j_cdbs->timer, dbs_timer_handler, |
484 | (unsigned long)j_cdbs, | |
485 | TIMER_DEFERRABLE | TIMER_IRQSAFE); | |
714a2d9c | 486 | } |
2abfa876 | 487 | |
714a2d9c VK |
488 | if (cdata->governor == GOV_CONSERVATIVE) { |
489 | struct cs_cpu_dbs_info_s *cs_dbs_info = | |
490 | cdata->get_cpu_dbs_info_s(cpu); | |
4471a34f | 491 | |
714a2d9c | 492 | cs_dbs_info->down_skip = 0; |
714a2d9c VK |
493 | cs_dbs_info->requested_freq = policy->cur; |
494 | } else { | |
495 | struct od_ops *od_ops = cdata->gov_ops; | |
496 | struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu); | |
4471a34f | 497 | |
714a2d9c VK |
498 | od_dbs_info->rate_mult = 1; |
499 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; | |
500 | od_ops->powersave_bias_init_cpu(cpu); | |
501 | } | |
4471a34f | 502 | |
70f43e5e | 503 | gov_add_timers(policy, delay_for_sampling_rate(sampling_rate)); |
714a2d9c VK |
504 | return 0; |
505 | } | |
506 | ||
a72c4959 VK |
507 | static int cpufreq_governor_stop(struct cpufreq_policy *policy, |
508 | struct dbs_data *dbs_data) | |
714a2d9c | 509 | { |
03d5eec0 | 510 | struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu); |
44152cb8 VK |
511 | struct cpu_common_dbs_info *shared = cdbs->shared; |
512 | ||
a72c4959 VK |
513 | /* State should be equivalent to START */ |
514 | if (!shared || !shared->policy) | |
515 | return -EBUSY; | |
516 | ||
70f43e5e | 517 | gov_cancel_work(shared); |
3a91b069 | 518 | shared->policy = NULL; |
3a91b069 | 519 | |
a72c4959 | 520 | return 0; |
714a2d9c | 521 | } |
4471a34f | 522 | |
a72c4959 VK |
523 | static int cpufreq_governor_limits(struct cpufreq_policy *policy, |
524 | struct dbs_data *dbs_data) | |
714a2d9c VK |
525 | { |
526 | struct common_dbs_data *cdata = dbs_data->cdata; | |
527 | unsigned int cpu = policy->cpu; | |
49a9a40c | 528 | struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu); |
8eeed095 | 529 | |
a72c4959 | 530 | /* State should be equivalent to START */ |
44152cb8 | 531 | if (!cdbs->shared || !cdbs->shared->policy) |
a72c4959 | 532 | return -EBUSY; |
4471a34f | 533 | |
44152cb8 VK |
534 | mutex_lock(&cdbs->shared->timer_mutex); |
535 | if (policy->max < cdbs->shared->policy->cur) | |
536 | __cpufreq_driver_target(cdbs->shared->policy, policy->max, | |
714a2d9c | 537 | CPUFREQ_RELATION_H); |
44152cb8 VK |
538 | else if (policy->min > cdbs->shared->policy->cur) |
539 | __cpufreq_driver_target(cdbs->shared->policy, policy->min, | |
714a2d9c VK |
540 | CPUFREQ_RELATION_L); |
541 | dbs_check_cpu(dbs_data, cpu); | |
44152cb8 | 542 | mutex_unlock(&cdbs->shared->timer_mutex); |
a72c4959 VK |
543 | |
544 | return 0; | |
714a2d9c | 545 | } |
4471a34f | 546 | |
714a2d9c VK |
547 | int cpufreq_governor_dbs(struct cpufreq_policy *policy, |
548 | struct common_dbs_data *cdata, unsigned int event) | |
549 | { | |
550 | struct dbs_data *dbs_data; | |
a72c4959 | 551 | int ret; |
714a2d9c | 552 | |
732b6d61 VK |
553 | /* Lock governor to block concurrent initialization of governor */ |
554 | mutex_lock(&cdata->mutex); | |
555 | ||
714a2d9c VK |
556 | if (have_governor_per_policy()) |
557 | dbs_data = policy->governor_data; | |
558 | else | |
559 | dbs_data = cdata->gdbs_data; | |
560 | ||
871ef3b5 | 561 | if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) { |
732b6d61 VK |
562 | ret = -EINVAL; |
563 | goto unlock; | |
564 | } | |
714a2d9c VK |
565 | |
566 | switch (event) { | |
567 | case CPUFREQ_GOV_POLICY_INIT: | |
568 | ret = cpufreq_governor_init(policy, dbs_data, cdata); | |
569 | break; | |
570 | case CPUFREQ_GOV_POLICY_EXIT: | |
a72c4959 | 571 | ret = cpufreq_governor_exit(policy, dbs_data); |
714a2d9c VK |
572 | break; |
573 | case CPUFREQ_GOV_START: | |
574 | ret = cpufreq_governor_start(policy, dbs_data); | |
575 | break; | |
576 | case CPUFREQ_GOV_STOP: | |
a72c4959 | 577 | ret = cpufreq_governor_stop(policy, dbs_data); |
714a2d9c | 578 | break; |
4471a34f | 579 | case CPUFREQ_GOV_LIMITS: |
a72c4959 | 580 | ret = cpufreq_governor_limits(policy, dbs_data); |
4471a34f | 581 | break; |
a72c4959 VK |
582 | default: |
583 | ret = -EINVAL; | |
4471a34f | 584 | } |
714a2d9c | 585 | |
732b6d61 VK |
586 | unlock: |
587 | mutex_unlock(&cdata->mutex); | |
588 | ||
714a2d9c | 589 | return ret; |
4471a34f VK |
590 | } |
591 | EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); |