Commit | Line | Data |
---|---|---|
8a67f0ef VK |
1 | /* |
2 | * ARM big.LITTLE Platforms CPUFreq support | |
3 | * | |
4 | * Copyright (C) 2013 ARM Ltd. | |
5 | * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> | |
6 | * | |
7 | * Copyright (C) 2013 Linaro. | |
8 | * Viresh Kumar <viresh.kumar@linaro.org> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
15 | * kind, whether express or implied; without even the implied warranty | |
16 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
21 | ||
22 | #include <linux/clk.h> | |
23 | #include <linux/cpu.h> | |
24 | #include <linux/cpufreq.h> | |
25 | #include <linux/cpumask.h> | |
26 | #include <linux/export.h> | |
39c8bbaf | 27 | #include <linux/module.h> |
e79a23c5 | 28 | #include <linux/mutex.h> |
8a67f0ef | 29 | #include <linux/of_platform.h> |
e4db1c74 | 30 | #include <linux/pm_opp.h> |
8a67f0ef VK |
31 | #include <linux/slab.h> |
32 | #include <linux/topology.h> | |
33 | #include <linux/types.h> | |
e79a23c5 | 34 | #include <asm/bL_switcher.h> |
8a67f0ef VK |
35 | |
36 | #include "arm_big_little.h" | |
37 | ||
38 | /* Currently we support only two clusters */ | |
e79a23c5 VK |
39 | #define A15_CLUSTER 0 |
40 | #define A7_CLUSTER 1 | |
8a67f0ef VK |
41 | #define MAX_CLUSTERS 2 |
42 | ||
e79a23c5 | 43 | #ifdef CONFIG_BL_SWITCHER |
45cac118 NP |
44 | static bool bL_switching_enabled; |
45 | #define is_bL_switching_enabled() bL_switching_enabled | |
46 | #define set_switching_enabled(x) (bL_switching_enabled = (x)) | |
e79a23c5 VK |
47 | #else |
48 | #define is_bL_switching_enabled() false | |
45cac118 | 49 | #define set_switching_enabled(x) do { } while (0) |
e79a23c5 VK |
50 | #endif |
51 | ||
52 | #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) | |
53 | #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) | |
54 | ||
8a67f0ef VK |
55 | static struct cpufreq_arm_bL_ops *arm_bL_ops; |
56 | static struct clk *clk[MAX_CLUSTERS]; | |
e79a23c5 VK |
57 | static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; |
58 | static atomic_t cluster_usage[MAX_CLUSTERS + 1]; | |
59 | ||
60 | static unsigned int clk_big_min; /* (Big) clock frequencies */ | |
61 | static unsigned int clk_little_max; /* Maximum clock frequency (Little) */ | |
62 | ||
63 | static DEFINE_PER_CPU(unsigned int, physical_cluster); | |
64 | static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq); | |
65 | ||
66 | static struct mutex cluster_lock[MAX_CLUSTERS]; | |
67 | ||
68 | static inline int raw_cpu_to_cluster(int cpu) | |
69 | { | |
70 | return topology_physical_package_id(cpu); | |
71 | } | |
72 | ||
73 | static inline int cpu_to_cluster(int cpu) | |
74 | { | |
75 | return is_bL_switching_enabled() ? | |
76 | MAX_CLUSTERS : raw_cpu_to_cluster(cpu); | |
77 | } | |
78 | ||
79 | static unsigned int find_cluster_maxfreq(int cluster) | |
80 | { | |
81 | int j; | |
82 | u32 max_freq = 0, cpu_freq; | |
83 | ||
84 | for_each_online_cpu(j) { | |
85 | cpu_freq = per_cpu(cpu_last_req_freq, j); | |
86 | ||
87 | if ((cluster == per_cpu(physical_cluster, j)) && | |
88 | (max_freq < cpu_freq)) | |
89 | max_freq = cpu_freq; | |
90 | } | |
91 | ||
92 | pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster, | |
93 | max_freq); | |
94 | ||
95 | return max_freq; | |
96 | } | |
97 | ||
98 | static unsigned int clk_get_cpu_rate(unsigned int cpu) | |
99 | { | |
100 | u32 cur_cluster = per_cpu(physical_cluster, cpu); | |
101 | u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; | |
102 | ||
103 | /* For switcher we use virtual A7 clock rates */ | |
104 | if (is_bL_switching_enabled()) | |
105 | rate = VIRT_FREQ(cur_cluster, rate); | |
106 | ||
107 | pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, | |
108 | cur_cluster, rate); | |
109 | ||
110 | return rate; | |
111 | } | |
112 | ||
113 | static unsigned int bL_cpufreq_get_rate(unsigned int cpu) | |
114 | { | |
115 | if (is_bL_switching_enabled()) { | |
116 | pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, | |
117 | cpu)); | |
118 | ||
119 | return per_cpu(cpu_last_req_freq, cpu); | |
120 | } else { | |
121 | return clk_get_cpu_rate(cpu); | |
122 | } | |
123 | } | |
8a67f0ef | 124 | |
e79a23c5 VK |
125 | static unsigned int |
126 | bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) | |
8a67f0ef | 127 | { |
e79a23c5 VK |
128 | u32 new_rate, prev_rate; |
129 | int ret; | |
130 | bool bLs = is_bL_switching_enabled(); | |
131 | ||
132 | mutex_lock(&cluster_lock[new_cluster]); | |
133 | ||
134 | if (bLs) { | |
135 | prev_rate = per_cpu(cpu_last_req_freq, cpu); | |
136 | per_cpu(cpu_last_req_freq, cpu) = rate; | |
137 | per_cpu(physical_cluster, cpu) = new_cluster; | |
138 | ||
139 | new_rate = find_cluster_maxfreq(new_cluster); | |
140 | new_rate = ACTUAL_FREQ(new_cluster, new_rate); | |
141 | } else { | |
142 | new_rate = rate; | |
143 | } | |
144 | ||
145 | pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n", | |
146 | __func__, cpu, old_cluster, new_cluster, new_rate); | |
147 | ||
148 | ret = clk_set_rate(clk[new_cluster], new_rate * 1000); | |
149 | if (WARN_ON(ret)) { | |
150 | pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, | |
151 | new_cluster); | |
152 | if (bLs) { | |
153 | per_cpu(cpu_last_req_freq, cpu) = prev_rate; | |
154 | per_cpu(physical_cluster, cpu) = old_cluster; | |
155 | } | |
156 | ||
157 | mutex_unlock(&cluster_lock[new_cluster]); | |
158 | ||
159 | return ret; | |
160 | } | |
161 | ||
162 | mutex_unlock(&cluster_lock[new_cluster]); | |
163 | ||
164 | /* Recalc freq for old cluster when switching clusters */ | |
165 | if (old_cluster != new_cluster) { | |
166 | pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n", | |
167 | __func__, cpu, old_cluster, new_cluster); | |
168 | ||
169 | /* Switch cluster */ | |
170 | bL_switch_request(cpu, new_cluster); | |
171 | ||
172 | mutex_lock(&cluster_lock[old_cluster]); | |
8a67f0ef | 173 | |
e79a23c5 VK |
174 | /* Set freq of old cluster if there are cpus left on it */ |
175 | new_rate = find_cluster_maxfreq(old_cluster); | |
176 | new_rate = ACTUAL_FREQ(old_cluster, new_rate); | |
177 | ||
178 | if (new_rate) { | |
179 | pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n", | |
180 | __func__, old_cluster, new_rate); | |
181 | ||
182 | if (clk_set_rate(clk[old_cluster], new_rate * 1000)) | |
183 | pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", | |
184 | __func__, ret, old_cluster); | |
185 | } | |
186 | mutex_unlock(&cluster_lock[old_cluster]); | |
187 | } | |
188 | ||
189 | return 0; | |
8a67f0ef VK |
190 | } |
191 | ||
8a67f0ef VK |
192 | /* Set clock frequency */ |
193 | static int bL_cpufreq_set_target(struct cpufreq_policy *policy, | |
9c0ebcf7 | 194 | unsigned int index) |
8a67f0ef | 195 | { |
e79a23c5 | 196 | u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; |
d4019f0a | 197 | unsigned int freqs_new; |
8a67f0ef | 198 | |
e79a23c5 VK |
199 | cur_cluster = cpu_to_cluster(cpu); |
200 | new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); | |
8a67f0ef | 201 | |
d4019f0a | 202 | freqs_new = freq_table[cur_cluster][index].frequency; |
8a67f0ef | 203 | |
e79a23c5 VK |
204 | if (is_bL_switching_enabled()) { |
205 | if ((actual_cluster == A15_CLUSTER) && | |
d4019f0a | 206 | (freqs_new < clk_big_min)) { |
e79a23c5 VK |
207 | new_cluster = A7_CLUSTER; |
208 | } else if ((actual_cluster == A7_CLUSTER) && | |
d4019f0a | 209 | (freqs_new > clk_little_max)) { |
e79a23c5 VK |
210 | new_cluster = A15_CLUSTER; |
211 | } | |
212 | } | |
213 | ||
d4019f0a | 214 | return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); |
8a67f0ef VK |
215 | } |
216 | ||
e79a23c5 VK |
217 | static inline u32 get_table_count(struct cpufreq_frequency_table *table) |
218 | { | |
219 | int count; | |
220 | ||
221 | for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) | |
222 | ; | |
223 | ||
224 | return count; | |
225 | } | |
226 | ||
227 | /* get the minimum frequency in the cpufreq_frequency_table */ | |
228 | static inline u32 get_table_min(struct cpufreq_frequency_table *table) | |
229 | { | |
041526f9 | 230 | struct cpufreq_frequency_table *pos; |
e79a23c5 | 231 | uint32_t min_freq = ~0; |
041526f9 SK |
232 | cpufreq_for_each_entry(pos, table) |
233 | if (pos->frequency < min_freq) | |
234 | min_freq = pos->frequency; | |
e79a23c5 VK |
235 | return min_freq; |
236 | } | |
237 | ||
238 | /* get the maximum frequency in the cpufreq_frequency_table */ | |
239 | static inline u32 get_table_max(struct cpufreq_frequency_table *table) | |
240 | { | |
041526f9 | 241 | struct cpufreq_frequency_table *pos; |
e79a23c5 | 242 | uint32_t max_freq = 0; |
041526f9 SK |
243 | cpufreq_for_each_entry(pos, table) |
244 | if (pos->frequency > max_freq) | |
245 | max_freq = pos->frequency; | |
e79a23c5 VK |
246 | return max_freq; |
247 | } | |
248 | ||
249 | static int merge_cluster_tables(void) | |
250 | { | |
251 | int i, j, k = 0, count = 1; | |
252 | struct cpufreq_frequency_table *table; | |
253 | ||
254 | for (i = 0; i < MAX_CLUSTERS; i++) | |
255 | count += get_table_count(freq_table[i]); | |
256 | ||
257 | table = kzalloc(sizeof(*table) * count, GFP_KERNEL); | |
258 | if (!table) | |
259 | return -ENOMEM; | |
260 | ||
261 | freq_table[MAX_CLUSTERS] = table; | |
262 | ||
263 | /* Add in reverse order to get freqs in increasing order */ | |
264 | for (i = MAX_CLUSTERS - 1; i >= 0; i--) { | |
265 | for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; | |
266 | j++) { | |
267 | table[k].frequency = VIRT_FREQ(i, | |
268 | freq_table[i][j].frequency); | |
269 | pr_debug("%s: index: %d, freq: %d\n", __func__, k, | |
270 | table[k].frequency); | |
271 | k++; | |
272 | } | |
273 | } | |
274 | ||
275 | table[k].driver_data = k; | |
276 | table[k].frequency = CPUFREQ_TABLE_END; | |
277 | ||
278 | pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k); | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
283 | static void _put_cluster_clk_and_freq_table(struct device *cpu_dev) | |
284 | { | |
285 | u32 cluster = raw_cpu_to_cluster(cpu_dev->id); | |
286 | ||
287 | if (!freq_table[cluster]) | |
288 | return; | |
289 | ||
290 | clk_put(clk[cluster]); | |
291 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | |
493b4cd2 VK |
292 | if (arm_bL_ops->free_opp_table) |
293 | arm_bL_ops->free_opp_table(cpu_dev); | |
e79a23c5 VK |
294 | dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); |
295 | } | |
296 | ||
8a67f0ef VK |
297 | static void put_cluster_clk_and_freq_table(struct device *cpu_dev) |
298 | { | |
299 | u32 cluster = cpu_to_cluster(cpu_dev->id); | |
e79a23c5 VK |
300 | int i; |
301 | ||
302 | if (atomic_dec_return(&cluster_usage[cluster])) | |
303 | return; | |
304 | ||
305 | if (cluster < MAX_CLUSTERS) | |
306 | return _put_cluster_clk_and_freq_table(cpu_dev); | |
8a67f0ef | 307 | |
e79a23c5 VK |
308 | for_each_present_cpu(i) { |
309 | struct device *cdev = get_cpu_device(i); | |
310 | if (!cdev) { | |
311 | pr_err("%s: failed to get cpu%d device\n", __func__, i); | |
312 | return; | |
313 | } | |
314 | ||
315 | _put_cluster_clk_and_freq_table(cdev); | |
8a67f0ef | 316 | } |
e79a23c5 VK |
317 | |
318 | /* free virtual table */ | |
319 | kfree(freq_table[cluster]); | |
8a67f0ef VK |
320 | } |
321 | ||
e79a23c5 | 322 | static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) |
8a67f0ef | 323 | { |
e79a23c5 | 324 | u32 cluster = raw_cpu_to_cluster(cpu_dev->id); |
8a67f0ef VK |
325 | char name[14] = "cpu-cluster."; |
326 | int ret; | |
327 | ||
e79a23c5 | 328 | if (freq_table[cluster]) |
8a67f0ef VK |
329 | return 0; |
330 | ||
331 | ret = arm_bL_ops->init_opp_table(cpu_dev); | |
332 | if (ret) { | |
333 | dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", | |
334 | __func__, cpu_dev->id, ret); | |
e79a23c5 | 335 | goto out; |
8a67f0ef VK |
336 | } |
337 | ||
5d4879cd | 338 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); |
8a67f0ef VK |
339 | if (ret) { |
340 | dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", | |
341 | __func__, cpu_dev->id, ret); | |
493b4cd2 | 342 | goto free_opp_table; |
8a67f0ef VK |
343 | } |
344 | ||
345 | name[12] = cluster + '0'; | |
076dec90 | 346 | clk[cluster] = clk_get(cpu_dev, name); |
8a67f0ef VK |
347 | if (!IS_ERR(clk[cluster])) { |
348 | dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", | |
349 | __func__, clk[cluster], freq_table[cluster], | |
350 | cluster); | |
351 | return 0; | |
352 | } | |
353 | ||
354 | dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", | |
355 | __func__, cpu_dev->id, cluster); | |
356 | ret = PTR_ERR(clk[cluster]); | |
5d4879cd | 357 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); |
8a67f0ef | 358 | |
493b4cd2 VK |
359 | free_opp_table: |
360 | if (arm_bL_ops->free_opp_table) | |
361 | arm_bL_ops->free_opp_table(cpu_dev); | |
e79a23c5 | 362 | out: |
8a67f0ef VK |
363 | dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, |
364 | cluster); | |
365 | return ret; | |
366 | } | |
367 | ||
e79a23c5 VK |
368 | static int get_cluster_clk_and_freq_table(struct device *cpu_dev) |
369 | { | |
370 | u32 cluster = cpu_to_cluster(cpu_dev->id); | |
371 | int i, ret; | |
372 | ||
373 | if (atomic_inc_return(&cluster_usage[cluster]) != 1) | |
374 | return 0; | |
375 | ||
376 | if (cluster < MAX_CLUSTERS) { | |
377 | ret = _get_cluster_clk_and_freq_table(cpu_dev); | |
378 | if (ret) | |
379 | atomic_dec(&cluster_usage[cluster]); | |
380 | return ret; | |
381 | } | |
382 | ||
383 | /* | |
384 | * Get data for all clusters and fill virtual cluster with a merge of | |
385 | * both | |
386 | */ | |
387 | for_each_present_cpu(i) { | |
388 | struct device *cdev = get_cpu_device(i); | |
389 | if (!cdev) { | |
390 | pr_err("%s: failed to get cpu%d device\n", __func__, i); | |
391 | return -ENODEV; | |
392 | } | |
393 | ||
394 | ret = _get_cluster_clk_and_freq_table(cdev); | |
395 | if (ret) | |
396 | goto put_clusters; | |
397 | } | |
398 | ||
399 | ret = merge_cluster_tables(); | |
400 | if (ret) | |
401 | goto put_clusters; | |
402 | ||
403 | /* Assuming 2 cluster, set clk_big_min and clk_little_max */ | |
404 | clk_big_min = get_table_min(freq_table[0]); | |
405 | clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1])); | |
406 | ||
407 | pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n", | |
408 | __func__, cluster, clk_big_min, clk_little_max); | |
409 | ||
410 | return 0; | |
411 | ||
412 | put_clusters: | |
413 | for_each_present_cpu(i) { | |
414 | struct device *cdev = get_cpu_device(i); | |
415 | if (!cdev) { | |
416 | pr_err("%s: failed to get cpu%d device\n", __func__, i); | |
417 | return -ENODEV; | |
418 | } | |
419 | ||
420 | _put_cluster_clk_and_freq_table(cdev); | |
421 | } | |
422 | ||
423 | atomic_dec(&cluster_usage[cluster]); | |
424 | ||
425 | return ret; | |
426 | } | |
427 | ||
8a67f0ef VK |
428 | /* Per-CPU initialization */ |
429 | static int bL_cpufreq_init(struct cpufreq_policy *policy) | |
430 | { | |
431 | u32 cur_cluster = cpu_to_cluster(policy->cpu); | |
432 | struct device *cpu_dev; | |
433 | int ret; | |
434 | ||
435 | cpu_dev = get_cpu_device(policy->cpu); | |
436 | if (!cpu_dev) { | |
437 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
438 | policy->cpu); | |
439 | return -ENODEV; | |
440 | } | |
441 | ||
442 | ret = get_cluster_clk_and_freq_table(cpu_dev); | |
443 | if (ret) | |
444 | return ret; | |
445 | ||
39b10ebe | 446 | ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]); |
8a67f0ef VK |
447 | if (ret) { |
448 | dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", | |
449 | policy->cpu, cur_cluster); | |
450 | put_cluster_clk_and_freq_table(cpu_dev); | |
451 | return ret; | |
452 | } | |
453 | ||
e79a23c5 | 454 | if (cur_cluster < MAX_CLUSTERS) { |
8f3ba3d3 | 455 | int cpu; |
456 | ||
e79a23c5 VK |
457 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); |
458 | ||
8f3ba3d3 | 459 | for_each_cpu(cpu, policy->cpus) |
460 | per_cpu(physical_cluster, cpu) = cur_cluster; | |
e79a23c5 VK |
461 | } else { |
462 | /* Assumption: during init, we are always running on A15 */ | |
463 | per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; | |
464 | } | |
465 | ||
8a67f0ef VK |
466 | if (arm_bL_ops->get_transition_latency) |
467 | policy->cpuinfo.transition_latency = | |
468 | arm_bL_ops->get_transition_latency(cpu_dev); | |
469 | else | |
470 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | |
471 | ||
e79a23c5 VK |
472 | if (is_bL_switching_enabled()) |
473 | per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); | |
8a67f0ef | 474 | |
2b80f313 | 475 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); |
8a67f0ef VK |
476 | return 0; |
477 | } | |
478 | ||
479 | static int bL_cpufreq_exit(struct cpufreq_policy *policy) | |
480 | { | |
481 | struct device *cpu_dev; | |
482 | ||
483 | cpu_dev = get_cpu_device(policy->cpu); | |
484 | if (!cpu_dev) { | |
485 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
486 | policy->cpu); | |
487 | return -ENODEV; | |
488 | } | |
489 | ||
490 | put_cluster_clk_and_freq_table(cpu_dev); | |
491 | dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); | |
492 | ||
493 | return 0; | |
494 | } | |
495 | ||
8a67f0ef VK |
496 | static struct cpufreq_driver bL_cpufreq_driver = { |
497 | .name = "arm-big-little", | |
0b981e70 | 498 | .flags = CPUFREQ_STICKY | |
ae6b4271 VK |
499 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
500 | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | |
3c75a150 | 501 | .verify = cpufreq_generic_frequency_table_verify, |
9c0ebcf7 | 502 | .target_index = bL_cpufreq_set_target, |
e79a23c5 | 503 | .get = bL_cpufreq_get_rate, |
8a67f0ef VK |
504 | .init = bL_cpufreq_init, |
505 | .exit = bL_cpufreq_exit, | |
3c75a150 | 506 | .attr = cpufreq_generic_attr, |
8a67f0ef VK |
507 | }; |
508 | ||
45cac118 NP |
509 | static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, |
510 | unsigned long action, void *_arg) | |
511 | { | |
512 | pr_debug("%s: action: %ld\n", __func__, action); | |
513 | ||
514 | switch (action) { | |
515 | case BL_NOTIFY_PRE_ENABLE: | |
516 | case BL_NOTIFY_PRE_DISABLE: | |
517 | cpufreq_unregister_driver(&bL_cpufreq_driver); | |
518 | break; | |
519 | ||
520 | case BL_NOTIFY_POST_ENABLE: | |
521 | set_switching_enabled(true); | |
522 | cpufreq_register_driver(&bL_cpufreq_driver); | |
523 | break; | |
524 | ||
525 | case BL_NOTIFY_POST_DISABLE: | |
526 | set_switching_enabled(false); | |
527 | cpufreq_register_driver(&bL_cpufreq_driver); | |
528 | break; | |
529 | ||
530 | default: | |
531 | return NOTIFY_DONE; | |
532 | } | |
533 | ||
534 | return NOTIFY_OK; | |
535 | } | |
536 | ||
537 | static struct notifier_block bL_switcher_notifier = { | |
538 | .notifier_call = bL_cpufreq_switcher_notifier, | |
539 | }; | |
540 | ||
8a67f0ef VK |
541 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) |
542 | { | |
e79a23c5 | 543 | int ret, i; |
8a67f0ef VK |
544 | |
545 | if (arm_bL_ops) { | |
546 | pr_debug("%s: Already registered: %s, exiting\n", __func__, | |
547 | arm_bL_ops->name); | |
548 | return -EBUSY; | |
549 | } | |
550 | ||
551 | if (!ops || !strlen(ops->name) || !ops->init_opp_table) { | |
552 | pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__); | |
553 | return -ENODEV; | |
554 | } | |
555 | ||
556 | arm_bL_ops = ops; | |
557 | ||
45cac118 NP |
558 | ret = bL_switcher_get_enabled(); |
559 | set_switching_enabled(ret); | |
560 | ||
e79a23c5 VK |
561 | for (i = 0; i < MAX_CLUSTERS; i++) |
562 | mutex_init(&cluster_lock[i]); | |
563 | ||
8a67f0ef VK |
564 | ret = cpufreq_register_driver(&bL_cpufreq_driver); |
565 | if (ret) { | |
566 | pr_info("%s: Failed registering platform driver: %s, err: %d\n", | |
567 | __func__, ops->name, ret); | |
568 | arm_bL_ops = NULL; | |
569 | } else { | |
45cac118 NP |
570 | ret = bL_switcher_register_notifier(&bL_switcher_notifier); |
571 | if (ret) { | |
572 | cpufreq_unregister_driver(&bL_cpufreq_driver); | |
573 | arm_bL_ops = NULL; | |
574 | } else { | |
575 | pr_info("%s: Registered platform driver: %s\n", | |
576 | __func__, ops->name); | |
577 | } | |
8a67f0ef VK |
578 | } |
579 | ||
45cac118 | 580 | bL_switcher_put_enabled(); |
8a67f0ef VK |
581 | return ret; |
582 | } | |
583 | EXPORT_SYMBOL_GPL(bL_cpufreq_register); | |
584 | ||
585 | void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) | |
586 | { | |
587 | if (arm_bL_ops != ops) { | |
588 | pr_err("%s: Registered with: %s, can't unregister, exiting\n", | |
589 | __func__, arm_bL_ops->name); | |
590 | return; | |
591 | } | |
592 | ||
45cac118 NP |
593 | bL_switcher_get_enabled(); |
594 | bL_switcher_unregister_notifier(&bL_switcher_notifier); | |
8a67f0ef | 595 | cpufreq_unregister_driver(&bL_cpufreq_driver); |
45cac118 | 596 | bL_switcher_put_enabled(); |
8a67f0ef VK |
597 | pr_info("%s: Un-registered platform driver: %s\n", __func__, |
598 | arm_bL_ops->name); | |
599 | arm_bL_ops = NULL; | |
600 | } | |
601 | EXPORT_SYMBOL_GPL(bL_cpufreq_unregister); | |
39c8bbaf UKK |
602 | |
603 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); | |
604 | MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver"); | |
605 | MODULE_LICENSE("GPL v2"); |