Merge back earlier cpufreq material for v4.5.
[deliverable/linux.git] / drivers / cpufreq / cpufreq_governor.c
CommitLineData
2aacdfff 1/*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
4471a34f
VK
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
2aacdfff 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
4471a34f
VK
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
2aacdfff 19#include <linux/export.h>
20#include <linux/kernel_stat.h>
4d5dcc42 21#include <linux/slab.h>
4471a34f
VK
22
23#include "cpufreq_governor.h"
24
4d5dcc42
VK
25static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
26{
27 if (have_governor_per_policy())
28 return dbs_data->cdata->attr_group_gov_pol;
29 else
30 return dbs_data->cdata->attr_group_gov_sys;
31}
32
4471a34f
VK
33void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
34{
875b8508 35 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
4471a34f
VK
36 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
44152cb8 38 struct cpufreq_policy *policy = cdbs->shared->policy;
18b46abd 39 unsigned int sampling_rate;
4471a34f
VK
40 unsigned int max_load = 0;
41 unsigned int ignore_nice;
42 unsigned int j;
43
18b46abd
SB
44 if (dbs_data->cdata->governor == GOV_ONDEMAND) {
45 struct od_cpu_dbs_info_s *od_dbs_info =
46 dbs_data->cdata->get_cpu_dbs_info_s(cpu);
47
48 /*
49 * Sometimes, the ondemand governor uses an additional
50 * multiplier to give long delays. So apply this multiplier to
51 * the 'sampling_rate', so as to keep the wake-up-from-idle
52 * detection logic a bit conservative.
53 */
54 sampling_rate = od_tuners->sampling_rate;
55 sampling_rate *= od_dbs_info->rate_mult;
56
6c4640c3 57 ignore_nice = od_tuners->ignore_nice_load;
18b46abd
SB
58 } else {
59 sampling_rate = cs_tuners->sampling_rate;
6c4640c3 60 ignore_nice = cs_tuners->ignore_nice_load;
18b46abd 61 }
4471a34f 62
dfa5bb62 63 /* Get Absolute Load */
4471a34f 64 for_each_cpu(j, policy->cpus) {
875b8508 65 struct cpu_dbs_info *j_cdbs;
9366d840
SK
66 u64 cur_wall_time, cur_idle_time;
67 unsigned int idle_time, wall_time;
4471a34f 68 unsigned int load;
9366d840 69 int io_busy = 0;
4471a34f 70
4d5dcc42 71 j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
4471a34f 72
9366d840
SK
73 /*
74 * For the purpose of ondemand, waiting for disk IO is
75 * an indication that you're performance critical, and
76 * not that the system is actually idle. So do not add
77 * the iowait time to the cpu idle time.
78 */
79 if (dbs_data->cdata->governor == GOV_ONDEMAND)
80 io_busy = od_tuners->io_is_busy;
81 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
4471a34f
VK
82
83 wall_time = (unsigned int)
84 (cur_wall_time - j_cdbs->prev_cpu_wall);
85 j_cdbs->prev_cpu_wall = cur_wall_time;
86
87 idle_time = (unsigned int)
88 (cur_idle_time - j_cdbs->prev_cpu_idle);
89 j_cdbs->prev_cpu_idle = cur_idle_time;
90
91 if (ignore_nice) {
92 u64 cur_nice;
93 unsigned long cur_nice_jiffies;
94
95 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
96 cdbs->prev_cpu_nice;
97 /*
98 * Assumption: nice time between sampling periods will
99 * be less than 2^32 jiffies for 32 bit sys
100 */
101 cur_nice_jiffies = (unsigned long)
102 cputime64_to_jiffies64(cur_nice);
103
104 cdbs->prev_cpu_nice =
105 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
106 idle_time += jiffies_to_usecs(cur_nice_jiffies);
107 }
108
4471a34f
VK
109 if (unlikely(!wall_time || wall_time < idle_time))
110 continue;
111
18b46abd
SB
112 /*
113 * If the CPU had gone completely idle, and a task just woke up
114 * on this CPU now, it would be unfair to calculate 'load' the
115 * usual way for this elapsed time-window, because it will show
116 * near-zero load, irrespective of how CPU intensive that task
117 * actually is. This is undesirable for latency-sensitive bursty
118 * workloads.
119 *
120 * To avoid this, we reuse the 'load' from the previous
121 * time-window and give this task a chance to start with a
122 * reasonably high CPU frequency. (However, we shouldn't over-do
123 * this copy, lest we get stuck at a high load (high frequency)
124 * for too long, even when the current system load has actually
125 * dropped down. So we perform the copy only once, upon the
126 * first wake-up from idle.)
127 *
128 * Detecting this situation is easy: the governor's deferrable
129 * timer would not have fired during CPU-idle periods. Hence
130 * an unusually large 'wall_time' (as compared to the sampling
131 * rate) indicates this scenario.
c8ae481b
VK
132 *
133 * prev_load can be zero in two cases and we must recalculate it
134 * for both cases:
135 * - during long idle intervals
136 * - explicitly set to zero
18b46abd 137 */
c8ae481b
VK
138 if (unlikely(wall_time > (2 * sampling_rate) &&
139 j_cdbs->prev_load)) {
18b46abd 140 load = j_cdbs->prev_load;
c8ae481b
VK
141
142 /*
143 * Perform a destructive copy, to ensure that we copy
144 * the previous load only once, upon the first wake-up
145 * from idle.
146 */
147 j_cdbs->prev_load = 0;
18b46abd
SB
148 } else {
149 load = 100 * (wall_time - idle_time) / wall_time;
150 j_cdbs->prev_load = load;
18b46abd 151 }
4471a34f 152
4471a34f
VK
153 if (load > max_load)
154 max_load = load;
155 }
156
4d5dcc42 157 dbs_data->cdata->gov_check_cpu(cpu, max_load);
4471a34f
VK
158}
159EXPORT_SYMBOL_GPL(dbs_check_cpu);
160
70f43e5e 161void gov_add_timers(struct cpufreq_policy *policy, unsigned int delay)
4471a34f 162{
70f43e5e
VK
163 struct dbs_data *dbs_data = policy->governor_data;
164 struct cpu_dbs_info *cdbs;
165 int cpu;
031299b3 166
70f43e5e
VK
167 for_each_cpu(cpu, policy->cpus) {
168 cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
169 cdbs->timer.expires = jiffies + delay;
170 add_timer_on(&cdbs->timer, cpu);
031299b3
VK
171 }
172}
70f43e5e 173EXPORT_SYMBOL_GPL(gov_add_timers);
031299b3 174
70f43e5e 175static inline void gov_cancel_timers(struct cpufreq_policy *policy)
031299b3 176{
70f43e5e 177 struct dbs_data *dbs_data = policy->governor_data;
875b8508 178 struct cpu_dbs_info *cdbs;
031299b3 179 int i;
58ddcead 180
031299b3
VK
181 for_each_cpu(i, policy->cpus) {
182 cdbs = dbs_data->cdata->get_cpu_cdbs(i);
70f43e5e 183 del_timer_sync(&cdbs->timer);
031299b3 184 }
4471a34f
VK
185}
186
70f43e5e
VK
187void gov_cancel_work(struct cpu_common_dbs_info *shared)
188{
2dd3e724
RW
189 /* Tell dbs_timer_handler() to skip queuing up work items. */
190 atomic_inc(&shared->skip_work);
70f43e5e 191 /*
2dd3e724
RW
192 * If dbs_timer_handler() is already running, it may not notice the
193 * incremented skip_work, so wait for it to complete to prevent its work
194 * item from being queued up after the cancel_work_sync() below.
195 */
196 gov_cancel_timers(shared->policy);
197 /*
198 * In case dbs_timer_handler() managed to run and spawn a work item
199 * before the timers have been canceled, wait for that work item to
200 * complete and then cancel all of the timers set up by it. If
201 * dbs_timer_handler() runs again at that point, it will see the
202 * positive value of skip_work and won't spawn any more work items.
70f43e5e 203 */
70f43e5e 204 cancel_work_sync(&shared->work);
70f43e5e 205 gov_cancel_timers(shared->policy);
2dd3e724 206 atomic_set(&shared->skip_work, 0);
70f43e5e
VK
207}
208EXPORT_SYMBOL_GPL(gov_cancel_work);
209
4447266b 210/* Will return if we need to evaluate cpu load again or not */
43e0ee36
VK
211static bool need_load_eval(struct cpu_common_dbs_info *shared,
212 unsigned int sampling_rate)
4447266b 213{
44152cb8 214 if (policy_is_shared(shared->policy)) {
4447266b 215 ktime_t time_now = ktime_get();
44152cb8 216 s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
4447266b
VK
217
218 /* Do nothing if we recently have sampled */
219 if (delta_us < (s64)(sampling_rate / 2))
220 return false;
221 else
44152cb8 222 shared->time_stamp = time_now;
4447266b
VK
223 }
224
225 return true;
226}
43e0ee36 227
70f43e5e 228static void dbs_work_handler(struct work_struct *work)
43e0ee36 229{
70f43e5e
VK
230 struct cpu_common_dbs_info *shared = container_of(work, struct
231 cpu_common_dbs_info, work);
3a91b069
VK
232 struct cpufreq_policy *policy;
233 struct dbs_data *dbs_data;
43e0ee36 234 unsigned int sampling_rate, delay;
70f43e5e 235 bool eval_load;
43e0ee36 236
3a91b069 237 policy = shared->policy;
3a91b069
VK
238 dbs_data = policy->governor_data;
239
70f43e5e
VK
240 /* Kill all timers */
241 gov_cancel_timers(policy);
242
43e0ee36
VK
243 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
244 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
245
246 sampling_rate = cs_tuners->sampling_rate;
247 } else {
248 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
249
250 sampling_rate = od_tuners->sampling_rate;
251 }
252
70f43e5e 253 eval_load = need_load_eval(shared, sampling_rate);
43e0ee36 254
70f43e5e
VK
255 /*
256 * Make sure cpufreq_governor_limits() isn't evaluating load in
257 * parallel.
258 */
259 mutex_lock(&shared->timer_mutex);
260 delay = dbs_data->cdata->gov_dbs_timer(policy, eval_load);
43e0ee36 261 mutex_unlock(&shared->timer_mutex);
70f43e5e 262
2dd3e724 263 atomic_dec(&shared->skip_work);
70f43e5e
VK
264
265 gov_add_timers(policy, delay);
266}
267
268static void dbs_timer_handler(unsigned long data)
269{
270 struct cpu_dbs_info *cdbs = (struct cpu_dbs_info *)data;
271 struct cpu_common_dbs_info *shared = cdbs->shared;
70f43e5e
VK
272
273 /*
2dd3e724
RW
274 * Timer handler may not be allowed to queue the work at the moment,
275 * because:
70f43e5e
VK
276 * - Another timer handler has done that
277 * - We are stopping the governor
2dd3e724 278 * - Or we are updating the sampling rate of the ondemand governor
70f43e5e 279 */
2dd3e724
RW
280 if (atomic_inc_return(&shared->skip_work) > 1)
281 atomic_dec(&shared->skip_work);
282 else
70f43e5e 283 queue_work(system_wq, &shared->work);
43e0ee36 284}
4447266b 285
4d5dcc42
VK
286static void set_sampling_rate(struct dbs_data *dbs_data,
287 unsigned int sampling_rate)
288{
289 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
290 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
291 cs_tuners->sampling_rate = sampling_rate;
292 } else {
293 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
294 od_tuners->sampling_rate = sampling_rate;
295 }
296}
297
44152cb8
VK
298static int alloc_common_dbs_info(struct cpufreq_policy *policy,
299 struct common_dbs_data *cdata)
300{
301 struct cpu_common_dbs_info *shared;
302 int j;
303
304 /* Allocate memory for the common information for policy->cpus */
305 shared = kzalloc(sizeof(*shared), GFP_KERNEL);
306 if (!shared)
307 return -ENOMEM;
308
309 /* Set shared for all CPUs, online+offline */
310 for_each_cpu(j, policy->related_cpus)
311 cdata->get_cpu_cdbs(j)->shared = shared;
312
5e4500d8 313 mutex_init(&shared->timer_mutex);
2dd3e724 314 atomic_set(&shared->skip_work, 0);
70f43e5e 315 INIT_WORK(&shared->work, dbs_work_handler);
44152cb8
VK
316 return 0;
317}
318
319static void free_common_dbs_info(struct cpufreq_policy *policy,
320 struct common_dbs_data *cdata)
321{
322 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
323 struct cpu_common_dbs_info *shared = cdbs->shared;
324 int j;
325
5e4500d8
VK
326 mutex_destroy(&shared->timer_mutex);
327
44152cb8
VK
328 for_each_cpu(j, policy->cpus)
329 cdata->get_cpu_cdbs(j)->shared = NULL;
330
331 kfree(shared);
332}
333
714a2d9c
VK
334static int cpufreq_governor_init(struct cpufreq_policy *policy,
335 struct dbs_data *dbs_data,
336 struct common_dbs_data *cdata)
4471a34f 337{
714a2d9c
VK
338 unsigned int latency;
339 int ret;
4471a34f 340
a72c4959
VK
341 /* State should be equivalent to EXIT */
342 if (policy->governor_data)
343 return -EBUSY;
344
714a2d9c
VK
345 if (dbs_data) {
346 if (WARN_ON(have_governor_per_policy()))
347 return -EINVAL;
44152cb8
VK
348
349 ret = alloc_common_dbs_info(policy, cdata);
350 if (ret)
351 return ret;
352
714a2d9c
VK
353 dbs_data->usage_count++;
354 policy->governor_data = dbs_data;
355 return 0;
356 }
4d5dcc42 357
714a2d9c
VK
358 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
359 if (!dbs_data)
360 return -ENOMEM;
4d5dcc42 361
44152cb8
VK
362 ret = alloc_common_dbs_info(policy, cdata);
363 if (ret)
364 goto free_dbs_data;
365
714a2d9c
VK
366 dbs_data->cdata = cdata;
367 dbs_data->usage_count = 1;
4d5dcc42 368
714a2d9c
VK
369 ret = cdata->init(dbs_data, !policy->governor->initialized);
370 if (ret)
44152cb8 371 goto free_common_dbs_info;
4d5dcc42 372
714a2d9c
VK
373 /* policy latency is in ns. Convert it to us first */
374 latency = policy->cpuinfo.transition_latency / 1000;
375 if (latency == 0)
376 latency = 1;
4d5dcc42 377
714a2d9c
VK
378 /* Bring kernel and HW constraints together */
379 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
380 MIN_LATENCY_MULTIPLIER * latency);
381 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
382 latency * LATENCY_MULTIPLIER));
2361be23 383
8eec1020 384 if (!have_governor_per_policy())
714a2d9c 385 cdata->gdbs_data = dbs_data;
4d5dcc42 386
714a2d9c
VK
387 ret = sysfs_create_group(get_governor_parent_kobj(policy),
388 get_sysfs_attr(dbs_data));
389 if (ret)
8eec1020 390 goto reset_gdbs_data;
4d5dcc42 391
714a2d9c 392 policy->governor_data = dbs_data;
4d5dcc42 393
714a2d9c 394 return 0;
4d5dcc42 395
8eec1020
VK
396reset_gdbs_data:
397 if (!have_governor_per_policy())
714a2d9c 398 cdata->gdbs_data = NULL;
714a2d9c 399 cdata->exit(dbs_data, !policy->governor->initialized);
44152cb8
VK
400free_common_dbs_info:
401 free_common_dbs_info(policy, cdata);
714a2d9c
VK
402free_dbs_data:
403 kfree(dbs_data);
404 return ret;
405}
4d5dcc42 406
a72c4959
VK
407static int cpufreq_governor_exit(struct cpufreq_policy *policy,
408 struct dbs_data *dbs_data)
714a2d9c
VK
409{
410 struct common_dbs_data *cdata = dbs_data->cdata;
a72c4959
VK
411 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
412
413 /* State should be equivalent to INIT */
414 if (!cdbs->shared || cdbs->shared->policy)
415 return -EBUSY;
4d5dcc42 416
714a2d9c
VK
417 policy->governor_data = NULL;
418 if (!--dbs_data->usage_count) {
419 sysfs_remove_group(get_governor_parent_kobj(policy),
420 get_sysfs_attr(dbs_data));
2361be23 421
8eec1020 422 if (!have_governor_per_policy())
4d5dcc42 423 cdata->gdbs_data = NULL;
4471a34f 424
714a2d9c
VK
425 cdata->exit(dbs_data, policy->governor->initialized == 1);
426 kfree(dbs_data);
4d5dcc42 427 }
44152cb8
VK
428
429 free_common_dbs_info(policy, cdata);
a72c4959 430 return 0;
714a2d9c 431}
4d5dcc42 432
714a2d9c
VK
433static int cpufreq_governor_start(struct cpufreq_policy *policy,
434 struct dbs_data *dbs_data)
435{
436 struct common_dbs_data *cdata = dbs_data->cdata;
437 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
49a9a40c 438 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
44152cb8 439 struct cpu_common_dbs_info *shared = cdbs->shared;
714a2d9c
VK
440 int io_busy = 0;
441
442 if (!policy->cur)
443 return -EINVAL;
444
a72c4959
VK
445 /* State should be equivalent to INIT */
446 if (!shared || shared->policy)
447 return -EBUSY;
448
714a2d9c
VK
449 if (cdata->governor == GOV_CONSERVATIVE) {
450 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
4d5dcc42 451
4d5dcc42 452 sampling_rate = cs_tuners->sampling_rate;
6c4640c3 453 ignore_nice = cs_tuners->ignore_nice_load;
4471a34f 454 } else {
714a2d9c
VK
455 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
456
4d5dcc42 457 sampling_rate = od_tuners->sampling_rate;
6c4640c3 458 ignore_nice = od_tuners->ignore_nice_load;
9366d840 459 io_busy = od_tuners->io_is_busy;
4471a34f
VK
460 }
461
44152cb8
VK
462 shared->policy = policy;
463 shared->time_stamp = ktime_get();
44152cb8 464
714a2d9c 465 for_each_cpu(j, policy->cpus) {
875b8508 466 struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
714a2d9c 467 unsigned int prev_load;
4471a34f 468
714a2d9c
VK
469 j_cdbs->prev_cpu_idle =
470 get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
4471a34f 471
714a2d9c
VK
472 prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
473 j_cdbs->prev_cpu_idle);
474 j_cdbs->prev_load = 100 * prev_load /
475 (unsigned int)j_cdbs->prev_cpu_wall;
18b46abd 476
714a2d9c
VK
477 if (ignore_nice)
478 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
18b46abd 479
70f43e5e
VK
480 __setup_timer(&j_cdbs->timer, dbs_timer_handler,
481 (unsigned long)j_cdbs,
482 TIMER_DEFERRABLE | TIMER_IRQSAFE);
714a2d9c 483 }
2abfa876 484
714a2d9c
VK
485 if (cdata->governor == GOV_CONSERVATIVE) {
486 struct cs_cpu_dbs_info_s *cs_dbs_info =
487 cdata->get_cpu_dbs_info_s(cpu);
4471a34f 488
714a2d9c 489 cs_dbs_info->down_skip = 0;
714a2d9c
VK
490 cs_dbs_info->requested_freq = policy->cur;
491 } else {
492 struct od_ops *od_ops = cdata->gov_ops;
493 struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
4471a34f 494
714a2d9c
VK
495 od_dbs_info->rate_mult = 1;
496 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
497 od_ops->powersave_bias_init_cpu(cpu);
498 }
4471a34f 499
70f43e5e 500 gov_add_timers(policy, delay_for_sampling_rate(sampling_rate));
714a2d9c
VK
501 return 0;
502}
503
a72c4959
VK
504static int cpufreq_governor_stop(struct cpufreq_policy *policy,
505 struct dbs_data *dbs_data)
714a2d9c 506{
03d5eec0 507 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu);
44152cb8
VK
508 struct cpu_common_dbs_info *shared = cdbs->shared;
509
a72c4959
VK
510 /* State should be equivalent to START */
511 if (!shared || !shared->policy)
512 return -EBUSY;
513
70f43e5e 514 gov_cancel_work(shared);
3a91b069 515 shared->policy = NULL;
3a91b069 516
a72c4959 517 return 0;
714a2d9c 518}
4471a34f 519
a72c4959
VK
520static int cpufreq_governor_limits(struct cpufreq_policy *policy,
521 struct dbs_data *dbs_data)
714a2d9c
VK
522{
523 struct common_dbs_data *cdata = dbs_data->cdata;
524 unsigned int cpu = policy->cpu;
49a9a40c 525 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
8eeed095 526
a72c4959 527 /* State should be equivalent to START */
44152cb8 528 if (!cdbs->shared || !cdbs->shared->policy)
a72c4959 529 return -EBUSY;
4471a34f 530
44152cb8
VK
531 mutex_lock(&cdbs->shared->timer_mutex);
532 if (policy->max < cdbs->shared->policy->cur)
533 __cpufreq_driver_target(cdbs->shared->policy, policy->max,
714a2d9c 534 CPUFREQ_RELATION_H);
44152cb8
VK
535 else if (policy->min > cdbs->shared->policy->cur)
536 __cpufreq_driver_target(cdbs->shared->policy, policy->min,
714a2d9c
VK
537 CPUFREQ_RELATION_L);
538 dbs_check_cpu(dbs_data, cpu);
44152cb8 539 mutex_unlock(&cdbs->shared->timer_mutex);
a72c4959
VK
540
541 return 0;
714a2d9c 542}
4471a34f 543
714a2d9c
VK
544int cpufreq_governor_dbs(struct cpufreq_policy *policy,
545 struct common_dbs_data *cdata, unsigned int event)
546{
547 struct dbs_data *dbs_data;
a72c4959 548 int ret;
714a2d9c 549
732b6d61
VK
550 /* Lock governor to block concurrent initialization of governor */
551 mutex_lock(&cdata->mutex);
552
714a2d9c
VK
553 if (have_governor_per_policy())
554 dbs_data = policy->governor_data;
555 else
556 dbs_data = cdata->gdbs_data;
557
871ef3b5 558 if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
732b6d61
VK
559 ret = -EINVAL;
560 goto unlock;
561 }
714a2d9c
VK
562
563 switch (event) {
564 case CPUFREQ_GOV_POLICY_INIT:
565 ret = cpufreq_governor_init(policy, dbs_data, cdata);
566 break;
567 case CPUFREQ_GOV_POLICY_EXIT:
a72c4959 568 ret = cpufreq_governor_exit(policy, dbs_data);
714a2d9c
VK
569 break;
570 case CPUFREQ_GOV_START:
571 ret = cpufreq_governor_start(policy, dbs_data);
572 break;
573 case CPUFREQ_GOV_STOP:
a72c4959 574 ret = cpufreq_governor_stop(policy, dbs_data);
714a2d9c 575 break;
4471a34f 576 case CPUFREQ_GOV_LIMITS:
a72c4959 577 ret = cpufreq_governor_limits(policy, dbs_data);
4471a34f 578 break;
a72c4959
VK
579 default:
580 ret = -EINVAL;
4471a34f 581 }
714a2d9c 582
732b6d61
VK
583unlock:
584 mutex_unlock(&cdata->mutex);
585
714a2d9c 586 return ret;
4471a34f
VK
587}
588EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
This page took 0.357721 seconds and 5 git commands to generate.