Commit | Line | Data |
---|---|---|
2aacdfff | 1 | /* |
2 | * drivers/cpufreq/cpufreq_governor.c | |
3 | * | |
4 | * CPUFREQ governors common code | |
5 | * | |
4471a34f VK |
6 | * Copyright (C) 2001 Russell King |
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | |
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | |
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | |
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | |
11 | * | |
2aacdfff | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
4471a34f VK |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | ||
2aacdfff | 19 | #include <linux/export.h> |
20 | #include <linux/kernel_stat.h> | |
adaf9fcd | 21 | #include <linux/sched.h> |
4d5dcc42 | 22 | #include <linux/slab.h> |
4471a34f VK |
23 | |
24 | #include "cpufreq_governor.h" | |
25 | ||
8c8f77fd RW |
26 | static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs); |
27 | ||
1112e9d8 | 28 | static DEFINE_MUTEX(gov_dbs_data_mutex); |
2bb8d94f | 29 | |
aded387b VK |
30 | /* Common sysfs tunables */ |
31 | /** | |
32 | * store_sampling_rate - update sampling rate effective immediately if needed. | |
33 | * | |
34 | * If new rate is smaller than the old, simply updating | |
35 | * dbs.sampling_rate might not be appropriate. For example, if the | |
36 | * original sampling_rate was 1 second and the requested new sampling rate is 10 | |
37 | * ms because the user needs immediate reaction from ondemand governor, but not | |
38 | * sure if higher frequency will be required or not, then, the governor may | |
39 | * change the sampling rate too late; up to 1 second later. Thus, if we are | |
40 | * reducing the sampling rate, we need to make the new value effective | |
41 | * immediately. | |
42 | * | |
aded387b VK |
43 | * This must be called with dbs_data->mutex held, otherwise traversing |
44 | * policy_dbs_list isn't safe. | |
45 | */ | |
0dd3c1d6 | 46 | ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, |
aded387b VK |
47 | size_t count) |
48 | { | |
0dd3c1d6 | 49 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
aded387b VK |
50 | struct policy_dbs_info *policy_dbs; |
51 | unsigned int rate; | |
52 | int ret; | |
53 | ret = sscanf(buf, "%u", &rate); | |
54 | if (ret != 1) | |
55 | return -EINVAL; | |
56 | ||
57 | dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate); | |
58 | ||
59 | /* | |
60 | * We are operating under dbs_data->mutex and so the list and its | |
61 | * entries can't be freed concurrently. | |
62 | */ | |
0dd3c1d6 | 63 | list_for_each_entry(policy_dbs, &attr_set->policy_list, list) { |
aded387b VK |
64 | mutex_lock(&policy_dbs->timer_mutex); |
65 | /* | |
66 | * On 32-bit architectures this may race with the | |
67 | * sample_delay_ns read in dbs_update_util_handler(), but that | |
68 | * really doesn't matter. If the read returns a value that's | |
69 | * too big, the sample will be skipped, but the next invocation | |
70 | * of dbs_update_util_handler() (when the update has been | |
78347cdb | 71 | * completed) will take a sample. |
aded387b VK |
72 | * |
73 | * If this runs in parallel with dbs_work_handler(), we may end | |
74 | * up overwriting the sample_delay_ns value that it has just | |
78347cdb RW |
75 | * written, but it will be corrected next time a sample is |
76 | * taken, so it shouldn't be significant. | |
aded387b | 77 | */ |
78347cdb | 78 | gov_update_sample_delay(policy_dbs, 0); |
aded387b VK |
79 | mutex_unlock(&policy_dbs->timer_mutex); |
80 | } | |
81 | ||
82 | return count; | |
83 | } | |
84 | EXPORT_SYMBOL_GPL(store_sampling_rate); | |
85 | ||
a33cce1c RW |
86 | /** |
87 | * gov_update_cpu_data - Update CPU load data. | |
a33cce1c RW |
88 | * @dbs_data: Top-level governor data pointer. |
89 | * | |
90 | * Update CPU load data for all CPUs in the domain governed by @dbs_data | |
91 | * (that may be a single policy or a bunch of them if governor tunables are | |
92 | * system-wide). | |
93 | * | |
94 | * Call under the @dbs_data mutex. | |
95 | */ | |
8c8f77fd | 96 | void gov_update_cpu_data(struct dbs_data *dbs_data) |
a33cce1c RW |
97 | { |
98 | struct policy_dbs_info *policy_dbs; | |
99 | ||
0dd3c1d6 | 100 | list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) { |
a33cce1c RW |
101 | unsigned int j; |
102 | ||
103 | for_each_cpu(j, policy_dbs->policy->cpus) { | |
8c8f77fd | 104 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
a33cce1c | 105 | |
b4f4b4b3 | 106 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, |
a33cce1c RW |
107 | dbs_data->io_is_busy); |
108 | if (dbs_data->ignore_nice_load) | |
109 | j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
110 | } | |
111 | } | |
112 | } | |
113 | EXPORT_SYMBOL_GPL(gov_update_cpu_data); | |
114 | ||
4cccf755 | 115 | unsigned int dbs_update(struct cpufreq_policy *policy) |
4471a34f | 116 | { |
bc505475 RW |
117 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
118 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
ff4b1789 | 119 | unsigned int ignore_nice = dbs_data->ignore_nice_load; |
4471a34f | 120 | unsigned int max_load = 0; |
8847e038 | 121 | unsigned int sampling_rate, io_busy, j; |
4471a34f | 122 | |
57dc3bcd RW |
123 | /* |
124 | * Sometimes governors may use an additional multiplier to increase | |
125 | * sample delays temporarily. Apply that multiplier to sampling_rate | |
126 | * so as to keep the wake-up-from-idle detection logic a bit | |
127 | * conservative. | |
128 | */ | |
129 | sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult; | |
8847e038 RW |
130 | /* |
131 | * For the purpose of ondemand, waiting for disk IO is an indication | |
132 | * that you're performance critical, and not that the system is actually | |
133 | * idle, so do not add the iowait time to the CPU idle time then. | |
134 | */ | |
135 | io_busy = dbs_data->io_is_busy; | |
4471a34f | 136 | |
dfa5bb62 | 137 | /* Get Absolute Load */ |
4471a34f | 138 | for_each_cpu(j, policy->cpus) { |
8c8f77fd | 139 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
b4f4b4b3 RW |
140 | u64 update_time, cur_idle_time; |
141 | unsigned int idle_time, time_elapsed; | |
4471a34f VK |
142 | unsigned int load; |
143 | ||
b4f4b4b3 | 144 | cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy); |
4471a34f | 145 | |
b4f4b4b3 RW |
146 | time_elapsed = update_time - j_cdbs->prev_update_time; |
147 | j_cdbs->prev_update_time = update_time; | |
4471a34f | 148 | |
94862a62 RW |
149 | idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; |
150 | j_cdbs->prev_cpu_idle = cur_idle_time; | |
4471a34f VK |
151 | |
152 | if (ignore_nice) { | |
679b8fe4 RW |
153 | u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
154 | ||
155 | idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice); | |
156 | j_cdbs->prev_cpu_nice = cur_nice; | |
4471a34f VK |
157 | } |
158 | ||
9485e4ca RW |
159 | if (unlikely(!time_elapsed)) { |
160 | /* | |
161 | * That can only happen when this function is called | |
162 | * twice in a row with a very short interval between the | |
163 | * calls, so the previous load value can be used then. | |
164 | */ | |
18b46abd | 165 | load = j_cdbs->prev_load; |
9485e4ca RW |
166 | } else if (unlikely(time_elapsed > 2 * sampling_rate && |
167 | j_cdbs->prev_load)) { | |
c8ae481b | 168 | /* |
9485e4ca RW |
169 | * If the CPU had gone completely idle and a task has |
170 | * just woken up on this CPU now, it would be unfair to | |
171 | * calculate 'load' the usual way for this elapsed | |
172 | * time-window, because it would show near-zero load, | |
173 | * irrespective of how CPU intensive that task actually | |
174 | * was. This is undesirable for latency-sensitive bursty | |
175 | * workloads. | |
176 | * | |
177 | * To avoid this, reuse the 'load' from the previous | |
178 | * time-window and give this task a chance to start with | |
179 | * a reasonably high CPU frequency. However, that | |
180 | * shouldn't be over-done, lest we get stuck at a high | |
181 | * load (high frequency) for too long, even when the | |
182 | * current system load has actually dropped down, so | |
183 | * clear prev_load to guarantee that the load will be | |
184 | * computed again next time. | |
185 | * | |
186 | * Detecting this situation is easy: the governor's | |
187 | * utilization update handler would not have run during | |
188 | * CPU-idle periods. Hence, an unusually large | |
189 | * 'time_elapsed' (as compared to the sampling rate) | |
190 | * indicates this scenario. | |
c8ae481b | 191 | */ |
9485e4ca | 192 | load = j_cdbs->prev_load; |
c8ae481b | 193 | j_cdbs->prev_load = 0; |
18b46abd | 194 | } else { |
9485e4ca RW |
195 | if (time_elapsed >= idle_time) { |
196 | load = 100 * (time_elapsed - idle_time) / time_elapsed; | |
197 | } else { | |
198 | /* | |
199 | * That can happen if idle_time is returned by | |
200 | * get_cpu_idle_time_jiffy(). In that case | |
201 | * idle_time is roughly equal to the difference | |
202 | * between time_elapsed and "busy time" obtained | |
203 | * from CPU statistics. Then, the "busy time" | |
204 | * can end up being greater than time_elapsed | |
205 | * (for example, if jiffies_64 and the CPU | |
206 | * statistics are updated by different CPUs), | |
207 | * so idle_time may in fact be negative. That | |
208 | * means, though, that the CPU was busy all | |
209 | * the time (on the rough average) during the | |
210 | * last sampling interval and 100 can be | |
211 | * returned as the load. | |
212 | */ | |
213 | load = (int)idle_time < 0 ? 100 : 0; | |
214 | } | |
18b46abd | 215 | j_cdbs->prev_load = load; |
18b46abd | 216 | } |
4471a34f | 217 | |
4471a34f VK |
218 | if (load > max_load) |
219 | max_load = load; | |
220 | } | |
4cccf755 | 221 | return max_load; |
4471a34f | 222 | } |
4cccf755 | 223 | EXPORT_SYMBOL_GPL(dbs_update); |
4471a34f | 224 | |
70f43e5e | 225 | static void dbs_work_handler(struct work_struct *work) |
43e0ee36 | 226 | { |
e40e7b25 | 227 | struct policy_dbs_info *policy_dbs; |
3a91b069 | 228 | struct cpufreq_policy *policy; |
ea59ee0d | 229 | struct dbs_governor *gov; |
43e0ee36 | 230 | |
e40e7b25 RW |
231 | policy_dbs = container_of(work, struct policy_dbs_info, work); |
232 | policy = policy_dbs->policy; | |
ea59ee0d | 233 | gov = dbs_governor_of(policy); |
3a91b069 | 234 | |
70f43e5e | 235 | /* |
9be4fd2c RW |
236 | * Make sure cpufreq_governor_limits() isn't evaluating load or the |
237 | * ondemand governor isn't updating the sampling rate in parallel. | |
70f43e5e | 238 | */ |
e40e7b25 | 239 | mutex_lock(&policy_dbs->timer_mutex); |
07aa4402 | 240 | gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy)); |
e40e7b25 | 241 | mutex_unlock(&policy_dbs->timer_mutex); |
70f43e5e | 242 | |
e4db2813 RW |
243 | /* Allow the utilization update handler to queue up more work. */ |
244 | atomic_set(&policy_dbs->work_count, 0); | |
9be4fd2c | 245 | /* |
e4db2813 RW |
246 | * If the update below is reordered with respect to the sample delay |
247 | * modification, the utilization update handler may end up using a stale | |
248 | * sample delay value. | |
9be4fd2c | 249 | */ |
e4db2813 RW |
250 | smp_wmb(); |
251 | policy_dbs->work_in_progress = false; | |
9be4fd2c RW |
252 | } |
253 | ||
254 | static void dbs_irq_work(struct irq_work *irq_work) | |
255 | { | |
e40e7b25 | 256 | struct policy_dbs_info *policy_dbs; |
70f43e5e | 257 | |
e40e7b25 | 258 | policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work); |
539a4c42 | 259 | schedule_work_on(smp_processor_id(), &policy_dbs->work); |
70f43e5e VK |
260 | } |
261 | ||
9be4fd2c RW |
262 | static void dbs_update_util_handler(struct update_util_data *data, u64 time, |
263 | unsigned long util, unsigned long max) | |
264 | { | |
265 | struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); | |
e40e7b25 | 266 | struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; |
27de3482 | 267 | u64 delta_ns, lst; |
70f43e5e VK |
268 | |
269 | /* | |
9be4fd2c RW |
270 | * The work may not be allowed to be queued up right now. |
271 | * Possible reasons: | |
272 | * - Work has already been queued up or is in progress. | |
9be4fd2c | 273 | * - It is too early (too little time from the previous sample). |
70f43e5e | 274 | */ |
e4db2813 RW |
275 | if (policy_dbs->work_in_progress) |
276 | return; | |
277 | ||
278 | /* | |
279 | * If the reads below are reordered before the check above, the value | |
280 | * of sample_delay_ns used in the computation may be stale. | |
281 | */ | |
282 | smp_rmb(); | |
27de3482 RW |
283 | lst = READ_ONCE(policy_dbs->last_sample_time); |
284 | delta_ns = time - lst; | |
e4db2813 RW |
285 | if ((s64)delta_ns < policy_dbs->sample_delay_ns) |
286 | return; | |
287 | ||
288 | /* | |
289 | * If the policy is not shared, the irq_work may be queued up right away | |
290 | * at this point. Otherwise, we need to ensure that only one of the | |
291 | * CPUs sharing the policy will do that. | |
292 | */ | |
27de3482 RW |
293 | if (policy_dbs->is_shared) { |
294 | if (!atomic_add_unless(&policy_dbs->work_count, 1, 1)) | |
295 | return; | |
296 | ||
297 | /* | |
298 | * If another CPU updated last_sample_time in the meantime, we | |
299 | * shouldn't be here, so clear the work counter and bail out. | |
300 | */ | |
301 | if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) { | |
302 | atomic_set(&policy_dbs->work_count, 0); | |
303 | return; | |
304 | } | |
305 | } | |
e4db2813 RW |
306 | |
307 | policy_dbs->last_sample_time = time; | |
308 | policy_dbs->work_in_progress = true; | |
309 | irq_work_queue(&policy_dbs->irq_work); | |
43e0ee36 | 310 | } |
4447266b | 311 | |
0bed612b RW |
312 | static void gov_set_update_util(struct policy_dbs_info *policy_dbs, |
313 | unsigned int delay_us) | |
314 | { | |
315 | struct cpufreq_policy *policy = policy_dbs->policy; | |
316 | int cpu; | |
317 | ||
318 | gov_update_sample_delay(policy_dbs, delay_us); | |
319 | policy_dbs->last_sample_time = 0; | |
320 | ||
321 | for_each_cpu(cpu, policy->cpus) { | |
322 | struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu); | |
323 | ||
324 | cpufreq_add_update_util_hook(cpu, &cdbs->update_util, | |
325 | dbs_update_util_handler); | |
326 | } | |
327 | } | |
328 | ||
329 | static inline void gov_clear_update_util(struct cpufreq_policy *policy) | |
330 | { | |
331 | int i; | |
332 | ||
333 | for_each_cpu(i, policy->cpus) | |
334 | cpufreq_remove_update_util_hook(i); | |
335 | ||
336 | synchronize_sched(); | |
337 | } | |
338 | ||
bc505475 RW |
339 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, |
340 | struct dbs_governor *gov) | |
44152cb8 | 341 | { |
e40e7b25 | 342 | struct policy_dbs_info *policy_dbs; |
44152cb8 VK |
343 | int j; |
344 | ||
7d5a9956 RW |
345 | /* Allocate memory for per-policy governor data. */ |
346 | policy_dbs = gov->alloc(); | |
e40e7b25 | 347 | if (!policy_dbs) |
bc505475 | 348 | return NULL; |
44152cb8 | 349 | |
581c214b | 350 | policy_dbs->policy = policy; |
e40e7b25 | 351 | mutex_init(&policy_dbs->timer_mutex); |
686cc637 | 352 | atomic_set(&policy_dbs->work_count, 0); |
e40e7b25 RW |
353 | init_irq_work(&policy_dbs->irq_work, dbs_irq_work); |
354 | INIT_WORK(&policy_dbs->work, dbs_work_handler); | |
cea6a9e7 RW |
355 | |
356 | /* Set policy_dbs for all CPUs, online+offline */ | |
357 | for_each_cpu(j, policy->related_cpus) { | |
8c8f77fd | 358 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
cea6a9e7 RW |
359 | |
360 | j_cdbs->policy_dbs = policy_dbs; | |
cea6a9e7 | 361 | } |
bc505475 | 362 | return policy_dbs; |
44152cb8 VK |
363 | } |
364 | ||
8c8f77fd | 365 | static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs, |
7bdad34d | 366 | struct dbs_governor *gov) |
44152cb8 | 367 | { |
44152cb8 VK |
368 | int j; |
369 | ||
e40e7b25 | 370 | mutex_destroy(&policy_dbs->timer_mutex); |
5e4500d8 | 371 | |
8c8f77fd RW |
372 | for_each_cpu(j, policy_dbs->policy->related_cpus) { |
373 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); | |
44152cb8 | 374 | |
cea6a9e7 RW |
375 | j_cdbs->policy_dbs = NULL; |
376 | j_cdbs->update_util.func = NULL; | |
377 | } | |
7d5a9956 | 378 | gov->free(policy_dbs); |
44152cb8 VK |
379 | } |
380 | ||
e788892b | 381 | int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) |
4471a34f | 382 | { |
ea59ee0d | 383 | struct dbs_governor *gov = dbs_governor_of(policy); |
1112e9d8 | 384 | struct dbs_data *dbs_data; |
bc505475 | 385 | struct policy_dbs_info *policy_dbs; |
714a2d9c | 386 | unsigned int latency; |
1112e9d8 | 387 | int ret = 0; |
4471a34f | 388 | |
a72c4959 VK |
389 | /* State should be equivalent to EXIT */ |
390 | if (policy->governor_data) | |
391 | return -EBUSY; | |
392 | ||
bc505475 RW |
393 | policy_dbs = alloc_policy_dbs_info(policy, gov); |
394 | if (!policy_dbs) | |
395 | return -ENOMEM; | |
44152cb8 | 396 | |
1112e9d8 RW |
397 | /* Protect gov->gdbs_data against concurrent updates. */ |
398 | mutex_lock(&gov_dbs_data_mutex); | |
399 | ||
400 | dbs_data = gov->gdbs_data; | |
bc505475 RW |
401 | if (dbs_data) { |
402 | if (WARN_ON(have_governor_per_policy())) { | |
403 | ret = -EINVAL; | |
404 | goto free_policy_dbs_info; | |
405 | } | |
bc505475 RW |
406 | policy_dbs->dbs_data = dbs_data; |
407 | policy->governor_data = policy_dbs; | |
c54df071 | 408 | |
0dd3c1d6 | 409 | gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list); |
1112e9d8 | 410 | goto out; |
714a2d9c | 411 | } |
4d5dcc42 | 412 | |
714a2d9c | 413 | dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); |
bc505475 RW |
414 | if (!dbs_data) { |
415 | ret = -ENOMEM; | |
416 | goto free_policy_dbs_info; | |
417 | } | |
44152cb8 | 418 | |
0dd3c1d6 | 419 | gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list); |
4d5dcc42 | 420 | |
9a15fb2c | 421 | ret = gov->init(dbs_data); |
714a2d9c | 422 | if (ret) |
e40e7b25 | 423 | goto free_policy_dbs_info; |
4d5dcc42 | 424 | |
714a2d9c VK |
425 | /* policy latency is in ns. Convert it to us first */ |
426 | latency = policy->cpuinfo.transition_latency / 1000; | |
427 | if (latency == 0) | |
428 | latency = 1; | |
4d5dcc42 | 429 | |
714a2d9c VK |
430 | /* Bring kernel and HW constraints together */ |
431 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | |
432 | MIN_LATENCY_MULTIPLIER * latency); | |
ff4b1789 VK |
433 | dbs_data->sampling_rate = max(dbs_data->min_sampling_rate, |
434 | LATENCY_MULTIPLIER * latency); | |
2361be23 | 435 | |
8eec1020 | 436 | if (!have_governor_per_policy()) |
7bdad34d | 437 | gov->gdbs_data = dbs_data; |
4d5dcc42 | 438 | |
c54df071 | 439 | policy_dbs->dbs_data = dbs_data; |
0dd3c1d6 | 440 | policy->governor_data = policy_dbs; |
c54df071 | 441 | |
c4435630 | 442 | gov->kobj_type.sysfs_ops = &governor_sysfs_ops; |
0dd3c1d6 | 443 | ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type, |
c4435630 VK |
444 | get_governor_parent_kobj(policy), |
445 | "%s", gov->gov.name); | |
fafd5e8a | 446 | if (!ret) |
1112e9d8 | 447 | goto out; |
4d5dcc42 | 448 | |
fafd5e8a | 449 | /* Failure, so roll back. */ |
666f4ccc | 450 | pr_err("initialization failed (dbs_data kobject init error %d)\n", ret); |
4d5dcc42 | 451 | |
e4b133cc VK |
452 | policy->governor_data = NULL; |
453 | ||
8eec1020 | 454 | if (!have_governor_per_policy()) |
7bdad34d | 455 | gov->gdbs_data = NULL; |
9a15fb2c | 456 | gov->exit(dbs_data); |
bc505475 RW |
457 | kfree(dbs_data); |
458 | ||
e40e7b25 | 459 | free_policy_dbs_info: |
8c8f77fd | 460 | free_policy_dbs_info(policy_dbs, gov); |
1112e9d8 RW |
461 | |
462 | out: | |
463 | mutex_unlock(&gov_dbs_data_mutex); | |
714a2d9c VK |
464 | return ret; |
465 | } | |
e788892b | 466 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init); |
4d5dcc42 | 467 | |
e788892b | 468 | void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy) |
714a2d9c | 469 | { |
ea59ee0d | 470 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
471 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
472 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
0dd3c1d6 | 473 | unsigned int count; |
a72c4959 | 474 | |
1112e9d8 RW |
475 | /* Protect gov->gdbs_data against concurrent updates. */ |
476 | mutex_lock(&gov_dbs_data_mutex); | |
477 | ||
0dd3c1d6 | 478 | count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list); |
2361be23 | 479 | |
0dd3c1d6 | 480 | policy->governor_data = NULL; |
e4b133cc | 481 | |
0dd3c1d6 | 482 | if (!count) { |
8eec1020 | 483 | if (!have_governor_per_policy()) |
7bdad34d | 484 | gov->gdbs_data = NULL; |
4471a34f | 485 | |
9a15fb2c | 486 | gov->exit(dbs_data); |
714a2d9c | 487 | kfree(dbs_data); |
4d5dcc42 | 488 | } |
44152cb8 | 489 | |
8c8f77fd | 490 | free_policy_dbs_info(policy_dbs, gov); |
1112e9d8 RW |
491 | |
492 | mutex_unlock(&gov_dbs_data_mutex); | |
714a2d9c | 493 | } |
e788892b | 494 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit); |
4d5dcc42 | 495 | |
e788892b | 496 | int cpufreq_dbs_governor_start(struct cpufreq_policy *policy) |
714a2d9c | 497 | { |
ea59ee0d | 498 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
499 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
500 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
702c9e54 | 501 | unsigned int sampling_rate, ignore_nice, j; |
8847e038 | 502 | unsigned int io_busy; |
714a2d9c VK |
503 | |
504 | if (!policy->cur) | |
505 | return -EINVAL; | |
506 | ||
e4db2813 | 507 | policy_dbs->is_shared = policy_is_shared(policy); |
57dc3bcd | 508 | policy_dbs->rate_mult = 1; |
e4db2813 | 509 | |
ff4b1789 VK |
510 | sampling_rate = dbs_data->sampling_rate; |
511 | ignore_nice = dbs_data->ignore_nice_load; | |
8847e038 | 512 | io_busy = dbs_data->io_is_busy; |
4471a34f | 513 | |
714a2d9c | 514 | for_each_cpu(j, policy->cpus) { |
8c8f77fd | 515 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
4471a34f | 516 | |
b4f4b4b3 | 517 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy); |
ba1ca654 RW |
518 | /* |
519 | * Make the first invocation of dbs_update() compute the load. | |
520 | */ | |
521 | j_cdbs->prev_load = 0; | |
18b46abd | 522 | |
714a2d9c VK |
523 | if (ignore_nice) |
524 | j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
714a2d9c | 525 | } |
2abfa876 | 526 | |
702c9e54 | 527 | gov->start(policy); |
4471a34f | 528 | |
e40e7b25 | 529 | gov_set_update_util(policy_dbs, sampling_rate); |
714a2d9c VK |
530 | return 0; |
531 | } | |
e788892b | 532 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start); |
714a2d9c | 533 | |
e788892b | 534 | void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy) |
714a2d9c | 535 | { |
f6709b8a RW |
536 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
537 | ||
538 | gov_clear_update_util(policy_dbs->policy); | |
539 | irq_work_sync(&policy_dbs->irq_work); | |
540 | cancel_work_sync(&policy_dbs->work); | |
541 | atomic_set(&policy_dbs->work_count, 0); | |
542 | policy_dbs->work_in_progress = false; | |
714a2d9c | 543 | } |
e788892b | 544 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop); |
4471a34f | 545 | |
e788892b | 546 | void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy) |
714a2d9c | 547 | { |
bc505475 | 548 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
8eeed095 | 549 | |
e9751894 | 550 | mutex_lock(&policy_dbs->timer_mutex); |
bf2be2de | 551 | cpufreq_policy_apply_limits(policy); |
4cccf755 RW |
552 | gov_update_sample_delay(policy_dbs, 0); |
553 | ||
e9751894 | 554 | mutex_unlock(&policy_dbs->timer_mutex); |
4471a34f | 555 | } |
e788892b | 556 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits); |