cpufreq: mt8173: check return value of regulator_get_voltage() call
[deliverable/linux.git] / drivers / cpufreq / mt8173-cpufreq.c
1 /*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15 #include <linux/clk.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_cooling.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpumask.h>
20 #include <linux/of.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_opp.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/slab.h>
25 #include <linux/thermal.h>
26
27 #define MIN_VOLT_SHIFT (100000)
28 #define MAX_VOLT_SHIFT (200000)
29 #define MAX_VOLT_LIMIT (1150000)
30 #define VOLT_TOL (10000)
31
32 /*
33 * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
34 * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
35 * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
36 * voltage inputs need to be controlled under a hardware limitation:
37 * 100mV < Vsram - Vproc < 200mV
38 *
39 * When scaling the clock frequency of a CPU clock domain, the clock source
40 * needs to be switched to another stable PLL clock temporarily until
41 * the original PLL becomes stable at target frequency.
42 */
43 struct mtk_cpu_dvfs_info {
44 struct device *cpu_dev;
45 struct regulator *proc_reg;
46 struct regulator *sram_reg;
47 struct clk *cpu_clk;
48 struct clk *inter_clk;
49 struct thermal_cooling_device *cdev;
50 int intermediate_voltage;
51 bool need_voltage_tracking;
52 };
53
54 static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
55 int new_vproc)
56 {
57 struct regulator *proc_reg = info->proc_reg;
58 struct regulator *sram_reg = info->sram_reg;
59 int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
60
61 old_vproc = regulator_get_voltage(proc_reg);
62 if (old_vproc < 0) {
63 pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
64 return old_vproc;
65 }
66 /* Vsram should not exceed the maximum allowed voltage of SoC. */
67 new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
68
69 if (old_vproc < new_vproc) {
70 /*
71 * When scaling up voltages, Vsram and Vproc scale up step
72 * by step. At each step, set Vsram to (Vproc + 200mV) first,
73 * then set Vproc to (Vsram - 100mV).
74 * Keep doing it until Vsram and Vproc hit target voltages.
75 */
76 do {
77 old_vsram = regulator_get_voltage(sram_reg);
78 if (old_vsram < 0) {
79 pr_err("%s: invalid Vsram value: %d\n",
80 __func__, old_vsram);
81 return old_vsram;
82 }
83 old_vproc = regulator_get_voltage(proc_reg);
84 if (old_vproc < 0) {
85 pr_err("%s: invalid Vproc value: %d\n",
86 __func__, old_vproc);
87 return old_vproc;
88 }
89
90 vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
91
92 if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
93 vsram = MAX_VOLT_LIMIT;
94
95 /*
96 * If the target Vsram hits the maximum voltage,
97 * try to set the exact voltage value first.
98 */
99 ret = regulator_set_voltage(sram_reg, vsram,
100 vsram);
101 if (ret)
102 ret = regulator_set_voltage(sram_reg,
103 vsram - VOLT_TOL,
104 vsram);
105
106 vproc = new_vproc;
107 } else {
108 ret = regulator_set_voltage(sram_reg, vsram,
109 vsram + VOLT_TOL);
110
111 vproc = vsram - MIN_VOLT_SHIFT;
112 }
113 if (ret)
114 return ret;
115
116 ret = regulator_set_voltage(proc_reg, vproc,
117 vproc + VOLT_TOL);
118 if (ret) {
119 regulator_set_voltage(sram_reg, old_vsram,
120 old_vsram);
121 return ret;
122 }
123 } while (vproc < new_vproc || vsram < new_vsram);
124 } else if (old_vproc > new_vproc) {
125 /*
126 * When scaling down voltages, Vsram and Vproc scale down step
127 * by step. At each step, set Vproc to (Vsram - 200mV) first,
128 * then set Vproc to (Vproc + 100mV).
129 * Keep doing it until Vsram and Vproc hit target voltages.
130 */
131 do {
132 old_vproc = regulator_get_voltage(proc_reg);
133 if (old_vproc < 0) {
134 pr_err("%s: invalid Vproc value: %d\n",
135 __func__, old_vproc);
136 return old_vproc;
137 }
138 old_vsram = regulator_get_voltage(sram_reg);
139 if (old_vsram < 0) {
140 pr_err("%s: invalid Vsram value: %d\n",
141 __func__, old_vsram);
142 return old_vsram;
143 }
144
145 vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
146 ret = regulator_set_voltage(proc_reg, vproc,
147 vproc + VOLT_TOL);
148 if (ret)
149 return ret;
150
151 if (vproc == new_vproc)
152 vsram = new_vsram;
153 else
154 vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
155
156 if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
157 vsram = MAX_VOLT_LIMIT;
158
159 /*
160 * If the target Vsram hits the maximum voltage,
161 * try to set the exact voltage value first.
162 */
163 ret = regulator_set_voltage(sram_reg, vsram,
164 vsram);
165 if (ret)
166 ret = regulator_set_voltage(sram_reg,
167 vsram - VOLT_TOL,
168 vsram);
169 } else {
170 ret = regulator_set_voltage(sram_reg, vsram,
171 vsram + VOLT_TOL);
172 }
173
174 if (ret) {
175 regulator_set_voltage(proc_reg, old_vproc,
176 old_vproc);
177 return ret;
178 }
179 } while (vproc > new_vproc + VOLT_TOL ||
180 vsram > new_vsram + VOLT_TOL);
181 }
182
183 return 0;
184 }
185
186 static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
187 {
188 if (info->need_voltage_tracking)
189 return mtk_cpufreq_voltage_tracking(info, vproc);
190 else
191 return regulator_set_voltage(info->proc_reg, vproc,
192 vproc + VOLT_TOL);
193 }
194
195 static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
196 unsigned int index)
197 {
198 struct cpufreq_frequency_table *freq_table = policy->freq_table;
199 struct clk *cpu_clk = policy->clk;
200 struct clk *armpll = clk_get_parent(cpu_clk);
201 struct mtk_cpu_dvfs_info *info = policy->driver_data;
202 struct device *cpu_dev = info->cpu_dev;
203 struct dev_pm_opp *opp;
204 long freq_hz, old_freq_hz;
205 int vproc, old_vproc, inter_vproc, target_vproc, ret;
206
207 inter_vproc = info->intermediate_voltage;
208
209 old_freq_hz = clk_get_rate(cpu_clk);
210 old_vproc = regulator_get_voltage(info->proc_reg);
211 if (old_vproc < 0) {
212 pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
213 return old_vproc;
214 }
215
216 freq_hz = freq_table[index].frequency * 1000;
217
218 rcu_read_lock();
219 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
220 if (IS_ERR(opp)) {
221 rcu_read_unlock();
222 pr_err("cpu%d: failed to find OPP for %ld\n",
223 policy->cpu, freq_hz);
224 return PTR_ERR(opp);
225 }
226 vproc = dev_pm_opp_get_voltage(opp);
227 rcu_read_unlock();
228
229 /*
230 * If the new voltage or the intermediate voltage is higher than the
231 * current voltage, scale up voltage first.
232 */
233 target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
234 if (old_vproc < target_vproc) {
235 ret = mtk_cpufreq_set_voltage(info, target_vproc);
236 if (ret) {
237 pr_err("cpu%d: failed to scale up voltage!\n",
238 policy->cpu);
239 mtk_cpufreq_set_voltage(info, old_vproc);
240 return ret;
241 }
242 }
243
244 /* Reparent the CPU clock to intermediate clock. */
245 ret = clk_set_parent(cpu_clk, info->inter_clk);
246 if (ret) {
247 pr_err("cpu%d: failed to re-parent cpu clock!\n",
248 policy->cpu);
249 mtk_cpufreq_set_voltage(info, old_vproc);
250 WARN_ON(1);
251 return ret;
252 }
253
254 /* Set the original PLL to target rate. */
255 ret = clk_set_rate(armpll, freq_hz);
256 if (ret) {
257 pr_err("cpu%d: failed to scale cpu clock rate!\n",
258 policy->cpu);
259 clk_set_parent(cpu_clk, armpll);
260 mtk_cpufreq_set_voltage(info, old_vproc);
261 return ret;
262 }
263
264 /* Set parent of CPU clock back to the original PLL. */
265 ret = clk_set_parent(cpu_clk, armpll);
266 if (ret) {
267 pr_err("cpu%d: failed to re-parent cpu clock!\n",
268 policy->cpu);
269 mtk_cpufreq_set_voltage(info, inter_vproc);
270 WARN_ON(1);
271 return ret;
272 }
273
274 /*
275 * If the new voltage is lower than the intermediate voltage or the
276 * original voltage, scale down to the new voltage.
277 */
278 if (vproc < inter_vproc || vproc < old_vproc) {
279 ret = mtk_cpufreq_set_voltage(info, vproc);
280 if (ret) {
281 pr_err("cpu%d: failed to scale down voltage!\n",
282 policy->cpu);
283 clk_set_parent(cpu_clk, info->inter_clk);
284 clk_set_rate(armpll, old_freq_hz);
285 clk_set_parent(cpu_clk, armpll);
286 return ret;
287 }
288 }
289
290 return 0;
291 }
292
293 static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
294 {
295 struct mtk_cpu_dvfs_info *info = policy->driver_data;
296 struct device_node *np = of_node_get(info->cpu_dev->of_node);
297
298 if (WARN_ON(!np))
299 return;
300
301 if (of_find_property(np, "#cooling-cells", NULL)) {
302 info->cdev = of_cpufreq_cooling_register(np,
303 policy->related_cpus);
304
305 if (IS_ERR(info->cdev)) {
306 dev_err(info->cpu_dev,
307 "running cpufreq without cooling device: %ld\n",
308 PTR_ERR(info->cdev));
309
310 info->cdev = NULL;
311 }
312 }
313
314 of_node_put(np);
315 }
316
317 static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
318 {
319 struct device *cpu_dev;
320 struct regulator *proc_reg = ERR_PTR(-ENODEV);
321 struct regulator *sram_reg = ERR_PTR(-ENODEV);
322 struct clk *cpu_clk = ERR_PTR(-ENODEV);
323 struct clk *inter_clk = ERR_PTR(-ENODEV);
324 struct dev_pm_opp *opp;
325 unsigned long rate;
326 int ret;
327
328 cpu_dev = get_cpu_device(cpu);
329 if (!cpu_dev) {
330 pr_err("failed to get cpu%d device\n", cpu);
331 return -ENODEV;
332 }
333
334 cpu_clk = clk_get(cpu_dev, "cpu");
335 if (IS_ERR(cpu_clk)) {
336 if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
337 pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
338 else
339 pr_err("failed to get cpu clk for cpu%d\n", cpu);
340
341 ret = PTR_ERR(cpu_clk);
342 return ret;
343 }
344
345 inter_clk = clk_get(cpu_dev, "intermediate");
346 if (IS_ERR(inter_clk)) {
347 if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
348 pr_warn("intermediate clk for cpu%d not ready, retry.\n",
349 cpu);
350 else
351 pr_err("failed to get intermediate clk for cpu%d\n",
352 cpu);
353
354 ret = PTR_ERR(inter_clk);
355 goto out_free_resources;
356 }
357
358 proc_reg = regulator_get_exclusive(cpu_dev, "proc");
359 if (IS_ERR(proc_reg)) {
360 if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
361 pr_warn("proc regulator for cpu%d not ready, retry.\n",
362 cpu);
363 else
364 pr_err("failed to get proc regulator for cpu%d\n",
365 cpu);
366
367 ret = PTR_ERR(proc_reg);
368 goto out_free_resources;
369 }
370
371 /* Both presence and absence of sram regulator are valid cases. */
372 sram_reg = regulator_get_exclusive(cpu_dev, "sram");
373
374 ret = dev_pm_opp_of_add_table(cpu_dev);
375 if (ret) {
376 pr_warn("no OPP table for cpu%d\n", cpu);
377 goto out_free_resources;
378 }
379
380 /* Search a safe voltage for intermediate frequency. */
381 rate = clk_get_rate(inter_clk);
382 rcu_read_lock();
383 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
384 if (IS_ERR(opp)) {
385 rcu_read_unlock();
386 pr_err("failed to get intermediate opp for cpu%d\n", cpu);
387 ret = PTR_ERR(opp);
388 goto out_free_opp_table;
389 }
390 info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
391 rcu_read_unlock();
392
393 info->cpu_dev = cpu_dev;
394 info->proc_reg = proc_reg;
395 info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
396 info->cpu_clk = cpu_clk;
397 info->inter_clk = inter_clk;
398
399 /*
400 * If SRAM regulator is present, software "voltage tracking" is needed
401 * for this CPU power domain.
402 */
403 info->need_voltage_tracking = !IS_ERR(sram_reg);
404
405 return 0;
406
407 out_free_opp_table:
408 dev_pm_opp_of_remove_table(cpu_dev);
409
410 out_free_resources:
411 if (!IS_ERR(proc_reg))
412 regulator_put(proc_reg);
413 if (!IS_ERR(sram_reg))
414 regulator_put(sram_reg);
415 if (!IS_ERR(cpu_clk))
416 clk_put(cpu_clk);
417 if (!IS_ERR(inter_clk))
418 clk_put(inter_clk);
419
420 return ret;
421 }
422
423 static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
424 {
425 if (!IS_ERR(info->proc_reg))
426 regulator_put(info->proc_reg);
427 if (!IS_ERR(info->sram_reg))
428 regulator_put(info->sram_reg);
429 if (!IS_ERR(info->cpu_clk))
430 clk_put(info->cpu_clk);
431 if (!IS_ERR(info->inter_clk))
432 clk_put(info->inter_clk);
433
434 dev_pm_opp_of_remove_table(info->cpu_dev);
435 }
436
437 static int mtk_cpufreq_init(struct cpufreq_policy *policy)
438 {
439 struct mtk_cpu_dvfs_info *info;
440 struct cpufreq_frequency_table *freq_table;
441 int ret;
442
443 info = kzalloc(sizeof(*info), GFP_KERNEL);
444 if (!info)
445 return -ENOMEM;
446
447 ret = mtk_cpu_dvfs_info_init(info, policy->cpu);
448 if (ret) {
449 pr_err("%s failed to initialize dvfs info for cpu%d\n",
450 __func__, policy->cpu);
451 goto out_free_dvfs_info;
452 }
453
454 ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
455 if (ret) {
456 pr_err("failed to init cpufreq table for cpu%d: %d\n",
457 policy->cpu, ret);
458 goto out_release_dvfs_info;
459 }
460
461 ret = cpufreq_table_validate_and_show(policy, freq_table);
462 if (ret) {
463 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
464 goto out_free_cpufreq_table;
465 }
466
467 /* CPUs in the same cluster share a clock and power domain. */
468 cpumask_copy(policy->cpus, &cpu_topology[policy->cpu].core_sibling);
469 policy->driver_data = info;
470 policy->clk = info->cpu_clk;
471
472 return 0;
473
474 out_free_cpufreq_table:
475 dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table);
476
477 out_release_dvfs_info:
478 mtk_cpu_dvfs_info_release(info);
479
480 out_free_dvfs_info:
481 kfree(info);
482
483 return ret;
484 }
485
486 static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
487 {
488 struct mtk_cpu_dvfs_info *info = policy->driver_data;
489
490 cpufreq_cooling_unregister(info->cdev);
491 dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
492 mtk_cpu_dvfs_info_release(info);
493 kfree(info);
494
495 return 0;
496 }
497
498 static struct cpufreq_driver mt8173_cpufreq_driver = {
499 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
500 CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
501 .verify = cpufreq_generic_frequency_table_verify,
502 .target_index = mtk_cpufreq_set_target,
503 .get = cpufreq_generic_get,
504 .init = mtk_cpufreq_init,
505 .exit = mtk_cpufreq_exit,
506 .ready = mtk_cpufreq_ready,
507 .name = "mtk-cpufreq",
508 .attr = cpufreq_generic_attr,
509 };
510
511 static int mt8173_cpufreq_probe(struct platform_device *pdev)
512 {
513 int ret;
514
515 ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
516 if (ret)
517 pr_err("failed to register mtk cpufreq driver\n");
518
519 return ret;
520 }
521
522 static struct platform_driver mt8173_cpufreq_platdrv = {
523 .driver = {
524 .name = "mt8173-cpufreq",
525 },
526 .probe = mt8173_cpufreq_probe,
527 };
528
529 static int mt8173_cpufreq_driver_init(void)
530 {
531 struct platform_device *pdev;
532 int err;
533
534 if (!of_machine_is_compatible("mediatek,mt8173"))
535 return -ENODEV;
536
537 err = platform_driver_register(&mt8173_cpufreq_platdrv);
538 if (err)
539 return err;
540
541 /*
542 * Since there's no place to hold device registration code and no
543 * device tree based way to match cpufreq driver yet, both the driver
544 * and the device registration codes are put here to handle defer
545 * probing.
546 */
547 pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
548 if (IS_ERR(pdev)) {
549 pr_err("failed to register mtk-cpufreq platform device\n");
550 return PTR_ERR(pdev);
551 }
552
553 return 0;
554 }
555 device_initcall(mt8173_cpufreq_driver_init);
This page took 0.042956 seconds and 5 git commands to generate.