ARM: OMAP: Split plat/cpu.h into local soc.h for mach-omap1 and mach-omap2
[deliverable/linux.git] / arch / arm / mach-omap1 / clock.c
1 /*
2 * linux/arch/arm/mach-omap1/clock.c
3 *
4 * Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
5 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
6 *
7 * Modified to use omap shared clock framework by
8 * Tony Lindgren <tony@atomide.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/io.h>
20 #include <linux/clk.h>
21 #include <linux/clkdev.h>
22
23 #include <asm/mach-types.h>
24
25 #include "soc.h"
26 #include <plat/usb.h>
27
28 #include <mach/hardware.h>
29
30 #include "../plat-omap/sram.h"
31
32 #include "iomap.h"
33 #include "clock.h"
34 #include "opp.h"
35
36 __u32 arm_idlect1_mask;
37 struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
38
39 static LIST_HEAD(clocks);
40 static DEFINE_MUTEX(clocks_mutex);
41 static DEFINE_SPINLOCK(clockfw_lock);
42
43 /*
44 * Omap1 specific clock functions
45 */
46
47 unsigned long omap1_uart_recalc(struct clk *clk)
48 {
49 unsigned int val = __raw_readl(clk->enable_reg);
50 return val & clk->enable_bit ? 48000000 : 12000000;
51 }
52
53 unsigned long omap1_sossi_recalc(struct clk *clk)
54 {
55 u32 div = omap_readl(MOD_CONF_CTRL_1);
56
57 div = (div >> 17) & 0x7;
58 div++;
59
60 return clk->parent->rate / div;
61 }
62
63 static void omap1_clk_allow_idle(struct clk *clk)
64 {
65 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
66
67 if (!(clk->flags & CLOCK_IDLE_CONTROL))
68 return;
69
70 if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
71 arm_idlect1_mask |= 1 << iclk->idlect_shift;
72 }
73
74 static void omap1_clk_deny_idle(struct clk *clk)
75 {
76 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
77
78 if (!(clk->flags & CLOCK_IDLE_CONTROL))
79 return;
80
81 if (iclk->no_idle_count++ == 0)
82 arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
83 }
84
85 static __u16 verify_ckctl_value(__u16 newval)
86 {
87 /* This function checks for following limitations set
88 * by the hardware (all conditions must be true):
89 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
90 * ARM_CK >= TC_CK
91 * DSP_CK >= TC_CK
92 * DSPMMU_CK >= TC_CK
93 *
94 * In addition following rules are enforced:
95 * LCD_CK <= TC_CK
96 * ARMPER_CK <= TC_CK
97 *
98 * However, maximum frequencies are not checked for!
99 */
100 __u8 per_exp;
101 __u8 lcd_exp;
102 __u8 arm_exp;
103 __u8 dsp_exp;
104 __u8 tc_exp;
105 __u8 dspmmu_exp;
106
107 per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
108 lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
109 arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
110 dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
111 tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
112 dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
113
114 if (dspmmu_exp < dsp_exp)
115 dspmmu_exp = dsp_exp;
116 if (dspmmu_exp > dsp_exp+1)
117 dspmmu_exp = dsp_exp+1;
118 if (tc_exp < arm_exp)
119 tc_exp = arm_exp;
120 if (tc_exp < dspmmu_exp)
121 tc_exp = dspmmu_exp;
122 if (tc_exp > lcd_exp)
123 lcd_exp = tc_exp;
124 if (tc_exp > per_exp)
125 per_exp = tc_exp;
126
127 newval &= 0xf000;
128 newval |= per_exp << CKCTL_PERDIV_OFFSET;
129 newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
130 newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
131 newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
132 newval |= tc_exp << CKCTL_TCDIV_OFFSET;
133 newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
134
135 return newval;
136 }
137
138 static int calc_dsor_exp(struct clk *clk, unsigned long rate)
139 {
140 /* Note: If target frequency is too low, this function will return 4,
141 * which is invalid value. Caller must check for this value and act
142 * accordingly.
143 *
144 * Note: This function does not check for following limitations set
145 * by the hardware (all conditions must be true):
146 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
147 * ARM_CK >= TC_CK
148 * DSP_CK >= TC_CK
149 * DSPMMU_CK >= TC_CK
150 */
151 unsigned long realrate;
152 struct clk * parent;
153 unsigned dsor_exp;
154
155 parent = clk->parent;
156 if (unlikely(parent == NULL))
157 return -EIO;
158
159 realrate = parent->rate;
160 for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
161 if (realrate <= rate)
162 break;
163
164 realrate /= 2;
165 }
166
167 return dsor_exp;
168 }
169
170 unsigned long omap1_ckctl_recalc(struct clk *clk)
171 {
172 /* Calculate divisor encoded as 2-bit exponent */
173 int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
174
175 return clk->parent->rate / dsor;
176 }
177
178 unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
179 {
180 int dsor;
181
182 /* Calculate divisor encoded as 2-bit exponent
183 *
184 * The clock control bits are in DSP domain,
185 * so api_ck is needed for access.
186 * Note that DSP_CKCTL virt addr = phys addr, so
187 * we must use __raw_readw() instead of omap_readw().
188 */
189 omap1_clk_enable(api_ck_p);
190 dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
191 omap1_clk_disable(api_ck_p);
192
193 return clk->parent->rate / dsor;
194 }
195
196 /* MPU virtual clock functions */
197 int omap1_select_table_rate(struct clk *clk, unsigned long rate)
198 {
199 /* Find the highest supported frequency <= rate and switch to it */
200 struct mpu_rate * ptr;
201 unsigned long ref_rate;
202
203 ref_rate = ck_ref_p->rate;
204
205 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
206 if (!(ptr->flags & cpu_mask))
207 continue;
208
209 if (ptr->xtal != ref_rate)
210 continue;
211
212 /* Can check only after xtal frequency check */
213 if (ptr->rate <= rate)
214 break;
215 }
216
217 if (!ptr->rate)
218 return -EINVAL;
219
220 /*
221 * In most cases we should not need to reprogram DPLL.
222 * Reprogramming the DPLL is tricky, it must be done from SRAM.
223 */
224 omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
225
226 /* XXX Do we need to recalculate the tree below DPLL1 at this point? */
227 ck_dpll1_p->rate = ptr->pll_rate;
228
229 return 0;
230 }
231
232 int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
233 {
234 int dsor_exp;
235 u16 regval;
236
237 dsor_exp = calc_dsor_exp(clk, rate);
238 if (dsor_exp > 3)
239 dsor_exp = -EINVAL;
240 if (dsor_exp < 0)
241 return dsor_exp;
242
243 regval = __raw_readw(DSP_CKCTL);
244 regval &= ~(3 << clk->rate_offset);
245 regval |= dsor_exp << clk->rate_offset;
246 __raw_writew(regval, DSP_CKCTL);
247 clk->rate = clk->parent->rate / (1 << dsor_exp);
248
249 return 0;
250 }
251
252 long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
253 {
254 int dsor_exp = calc_dsor_exp(clk, rate);
255 if (dsor_exp < 0)
256 return dsor_exp;
257 if (dsor_exp > 3)
258 dsor_exp = 3;
259 return clk->parent->rate / (1 << dsor_exp);
260 }
261
262 int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
263 {
264 int dsor_exp;
265 u16 regval;
266
267 dsor_exp = calc_dsor_exp(clk, rate);
268 if (dsor_exp > 3)
269 dsor_exp = -EINVAL;
270 if (dsor_exp < 0)
271 return dsor_exp;
272
273 regval = omap_readw(ARM_CKCTL);
274 regval &= ~(3 << clk->rate_offset);
275 regval |= dsor_exp << clk->rate_offset;
276 regval = verify_ckctl_value(regval);
277 omap_writew(regval, ARM_CKCTL);
278 clk->rate = clk->parent->rate / (1 << dsor_exp);
279 return 0;
280 }
281
282 long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
283 {
284 /* Find the highest supported frequency <= rate */
285 struct mpu_rate * ptr;
286 long highest_rate;
287 unsigned long ref_rate;
288
289 ref_rate = ck_ref_p->rate;
290
291 highest_rate = -EINVAL;
292
293 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
294 if (!(ptr->flags & cpu_mask))
295 continue;
296
297 if (ptr->xtal != ref_rate)
298 continue;
299
300 highest_rate = ptr->rate;
301
302 /* Can check only after xtal frequency check */
303 if (ptr->rate <= rate)
304 break;
305 }
306
307 return highest_rate;
308 }
309
310 static unsigned calc_ext_dsor(unsigned long rate)
311 {
312 unsigned dsor;
313
314 /* MCLK and BCLK divisor selection is not linear:
315 * freq = 96MHz / dsor
316 *
317 * RATIO_SEL range: dsor <-> RATIO_SEL
318 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
319 * 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
320 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
321 * can not be used.
322 */
323 for (dsor = 2; dsor < 96; ++dsor) {
324 if ((dsor & 1) && dsor > 8)
325 continue;
326 if (rate >= 96000000 / dsor)
327 break;
328 }
329 return dsor;
330 }
331
332 /* XXX Only needed on 1510 */
333 int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
334 {
335 unsigned int val;
336
337 val = __raw_readl(clk->enable_reg);
338 if (rate == 12000000)
339 val &= ~(1 << clk->enable_bit);
340 else if (rate == 48000000)
341 val |= (1 << clk->enable_bit);
342 else
343 return -EINVAL;
344 __raw_writel(val, clk->enable_reg);
345 clk->rate = rate;
346
347 return 0;
348 }
349
350 /* External clock (MCLK & BCLK) functions */
351 int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
352 {
353 unsigned dsor;
354 __u16 ratio_bits;
355
356 dsor = calc_ext_dsor(rate);
357 clk->rate = 96000000 / dsor;
358 if (dsor > 8)
359 ratio_bits = ((dsor - 8) / 2 + 6) << 2;
360 else
361 ratio_bits = (dsor - 2) << 2;
362
363 ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
364 __raw_writew(ratio_bits, clk->enable_reg);
365
366 return 0;
367 }
368
369 int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
370 {
371 u32 l;
372 int div;
373 unsigned long p_rate;
374
375 p_rate = clk->parent->rate;
376 /* Round towards slower frequency */
377 div = (p_rate + rate - 1) / rate;
378 div--;
379 if (div < 0 || div > 7)
380 return -EINVAL;
381
382 l = omap_readl(MOD_CONF_CTRL_1);
383 l &= ~(7 << 17);
384 l |= div << 17;
385 omap_writel(l, MOD_CONF_CTRL_1);
386
387 clk->rate = p_rate / (div + 1);
388
389 return 0;
390 }
391
392 long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
393 {
394 return 96000000 / calc_ext_dsor(rate);
395 }
396
397 void omap1_init_ext_clk(struct clk *clk)
398 {
399 unsigned dsor;
400 __u16 ratio_bits;
401
402 /* Determine current rate and ensure clock is based on 96MHz APLL */
403 ratio_bits = __raw_readw(clk->enable_reg) & ~1;
404 __raw_writew(ratio_bits, clk->enable_reg);
405
406 ratio_bits = (ratio_bits & 0xfc) >> 2;
407 if (ratio_bits > 6)
408 dsor = (ratio_bits - 6) * 2 + 8;
409 else
410 dsor = ratio_bits + 2;
411
412 clk-> rate = 96000000 / dsor;
413 }
414
415 int omap1_clk_enable(struct clk *clk)
416 {
417 int ret = 0;
418
419 if (clk->usecount++ == 0) {
420 if (clk->parent) {
421 ret = omap1_clk_enable(clk->parent);
422 if (ret)
423 goto err;
424
425 if (clk->flags & CLOCK_NO_IDLE_PARENT)
426 omap1_clk_deny_idle(clk->parent);
427 }
428
429 ret = clk->ops->enable(clk);
430 if (ret) {
431 if (clk->parent)
432 omap1_clk_disable(clk->parent);
433 goto err;
434 }
435 }
436 return ret;
437
438 err:
439 clk->usecount--;
440 return ret;
441 }
442
443 void omap1_clk_disable(struct clk *clk)
444 {
445 if (clk->usecount > 0 && !(--clk->usecount)) {
446 clk->ops->disable(clk);
447 if (likely(clk->parent)) {
448 omap1_clk_disable(clk->parent);
449 if (clk->flags & CLOCK_NO_IDLE_PARENT)
450 omap1_clk_allow_idle(clk->parent);
451 }
452 }
453 }
454
455 static int omap1_clk_enable_generic(struct clk *clk)
456 {
457 __u16 regval16;
458 __u32 regval32;
459
460 if (unlikely(clk->enable_reg == NULL)) {
461 printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
462 clk->name);
463 return -EINVAL;
464 }
465
466 if (clk->flags & ENABLE_REG_32BIT) {
467 regval32 = __raw_readl(clk->enable_reg);
468 regval32 |= (1 << clk->enable_bit);
469 __raw_writel(regval32, clk->enable_reg);
470 } else {
471 regval16 = __raw_readw(clk->enable_reg);
472 regval16 |= (1 << clk->enable_bit);
473 __raw_writew(regval16, clk->enable_reg);
474 }
475
476 return 0;
477 }
478
479 static void omap1_clk_disable_generic(struct clk *clk)
480 {
481 __u16 regval16;
482 __u32 regval32;
483
484 if (clk->enable_reg == NULL)
485 return;
486
487 if (clk->flags & ENABLE_REG_32BIT) {
488 regval32 = __raw_readl(clk->enable_reg);
489 regval32 &= ~(1 << clk->enable_bit);
490 __raw_writel(regval32, clk->enable_reg);
491 } else {
492 regval16 = __raw_readw(clk->enable_reg);
493 regval16 &= ~(1 << clk->enable_bit);
494 __raw_writew(regval16, clk->enable_reg);
495 }
496 }
497
498 const struct clkops clkops_generic = {
499 .enable = omap1_clk_enable_generic,
500 .disable = omap1_clk_disable_generic,
501 };
502
503 static int omap1_clk_enable_dsp_domain(struct clk *clk)
504 {
505 int retval;
506
507 retval = omap1_clk_enable(api_ck_p);
508 if (!retval) {
509 retval = omap1_clk_enable_generic(clk);
510 omap1_clk_disable(api_ck_p);
511 }
512
513 return retval;
514 }
515
516 static void omap1_clk_disable_dsp_domain(struct clk *clk)
517 {
518 if (omap1_clk_enable(api_ck_p) == 0) {
519 omap1_clk_disable_generic(clk);
520 omap1_clk_disable(api_ck_p);
521 }
522 }
523
524 const struct clkops clkops_dspck = {
525 .enable = omap1_clk_enable_dsp_domain,
526 .disable = omap1_clk_disable_dsp_domain,
527 };
528
529 /* XXX SYSC register handling does not belong in the clock framework */
530 static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
531 {
532 int ret;
533 struct uart_clk *uclk;
534
535 ret = omap1_clk_enable_generic(clk);
536 if (ret == 0) {
537 /* Set smart idle acknowledgement mode */
538 uclk = (struct uart_clk *)clk;
539 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
540 uclk->sysc_addr);
541 }
542
543 return ret;
544 }
545
546 /* XXX SYSC register handling does not belong in the clock framework */
547 static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
548 {
549 struct uart_clk *uclk;
550
551 /* Set force idle acknowledgement mode */
552 uclk = (struct uart_clk *)clk;
553 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
554
555 omap1_clk_disable_generic(clk);
556 }
557
558 /* XXX SYSC register handling does not belong in the clock framework */
559 const struct clkops clkops_uart_16xx = {
560 .enable = omap1_clk_enable_uart_functional_16xx,
561 .disable = omap1_clk_disable_uart_functional_16xx,
562 };
563
564 long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
565 {
566 if (clk->round_rate != NULL)
567 return clk->round_rate(clk, rate);
568
569 return clk->rate;
570 }
571
572 int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
573 {
574 int ret = -EINVAL;
575
576 if (clk->set_rate)
577 ret = clk->set_rate(clk, rate);
578 return ret;
579 }
580
581 /*
582 * Omap1 clock reset and init functions
583 */
584
585 #ifdef CONFIG_OMAP_RESET_CLOCKS
586
587 void omap1_clk_disable_unused(struct clk *clk)
588 {
589 __u32 regval32;
590
591 /* Clocks in the DSP domain need api_ck. Just assume bootloader
592 * has not enabled any DSP clocks */
593 if (clk->enable_reg == DSP_IDLECT2) {
594 pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
595 clk->name);
596 return;
597 }
598
599 /* Is the clock already disabled? */
600 if (clk->flags & ENABLE_REG_32BIT)
601 regval32 = __raw_readl(clk->enable_reg);
602 else
603 regval32 = __raw_readw(clk->enable_reg);
604
605 if ((regval32 & (1 << clk->enable_bit)) == 0)
606 return;
607
608 printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
609 clk->ops->disable(clk);
610 printk(" done\n");
611 }
612
613 #endif
614
615
616 int clk_enable(struct clk *clk)
617 {
618 unsigned long flags;
619 int ret;
620
621 if (clk == NULL || IS_ERR(clk))
622 return -EINVAL;
623
624 spin_lock_irqsave(&clockfw_lock, flags);
625 ret = omap1_clk_enable(clk);
626 spin_unlock_irqrestore(&clockfw_lock, flags);
627
628 return ret;
629 }
630 EXPORT_SYMBOL(clk_enable);
631
632 void clk_disable(struct clk *clk)
633 {
634 unsigned long flags;
635
636 if (clk == NULL || IS_ERR(clk))
637 return;
638
639 spin_lock_irqsave(&clockfw_lock, flags);
640 if (clk->usecount == 0) {
641 pr_err("Trying disable clock %s with 0 usecount\n",
642 clk->name);
643 WARN_ON(1);
644 goto out;
645 }
646
647 omap1_clk_disable(clk);
648
649 out:
650 spin_unlock_irqrestore(&clockfw_lock, flags);
651 }
652 EXPORT_SYMBOL(clk_disable);
653
654 unsigned long clk_get_rate(struct clk *clk)
655 {
656 unsigned long flags;
657 unsigned long ret;
658
659 if (clk == NULL || IS_ERR(clk))
660 return 0;
661
662 spin_lock_irqsave(&clockfw_lock, flags);
663 ret = clk->rate;
664 spin_unlock_irqrestore(&clockfw_lock, flags);
665
666 return ret;
667 }
668 EXPORT_SYMBOL(clk_get_rate);
669
670 /*
671 * Optional clock functions defined in include/linux/clk.h
672 */
673
674 long clk_round_rate(struct clk *clk, unsigned long rate)
675 {
676 unsigned long flags;
677 long ret;
678
679 if (clk == NULL || IS_ERR(clk))
680 return 0;
681
682 spin_lock_irqsave(&clockfw_lock, flags);
683 ret = omap1_clk_round_rate(clk, rate);
684 spin_unlock_irqrestore(&clockfw_lock, flags);
685
686 return ret;
687 }
688 EXPORT_SYMBOL(clk_round_rate);
689
690 int clk_set_rate(struct clk *clk, unsigned long rate)
691 {
692 unsigned long flags;
693 int ret = -EINVAL;
694
695 if (clk == NULL || IS_ERR(clk))
696 return ret;
697
698 spin_lock_irqsave(&clockfw_lock, flags);
699 ret = omap1_clk_set_rate(clk, rate);
700 if (ret == 0)
701 propagate_rate(clk);
702 spin_unlock_irqrestore(&clockfw_lock, flags);
703
704 return ret;
705 }
706 EXPORT_SYMBOL(clk_set_rate);
707
708 int clk_set_parent(struct clk *clk, struct clk *parent)
709 {
710 WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
711
712 return -EINVAL;
713 }
714 EXPORT_SYMBOL(clk_set_parent);
715
716 struct clk *clk_get_parent(struct clk *clk)
717 {
718 return clk->parent;
719 }
720 EXPORT_SYMBOL(clk_get_parent);
721
722 /*
723 * OMAP specific clock functions shared between omap1 and omap2
724 */
725
726 int __initdata mpurate;
727
728 /*
729 * By default we use the rate set by the bootloader.
730 * You can override this with mpurate= cmdline option.
731 */
732 static int __init omap_clk_setup(char *str)
733 {
734 get_option(&str, &mpurate);
735
736 if (!mpurate)
737 return 1;
738
739 if (mpurate < 1000)
740 mpurate *= 1000000;
741
742 return 1;
743 }
744 __setup("mpurate=", omap_clk_setup);
745
746 /* Used for clocks that always have same value as the parent clock */
747 unsigned long followparent_recalc(struct clk *clk)
748 {
749 return clk->parent->rate;
750 }
751
752 /*
753 * Used for clocks that have the same value as the parent clock,
754 * divided by some factor
755 */
756 unsigned long omap_fixed_divisor_recalc(struct clk *clk)
757 {
758 WARN_ON(!clk->fixed_div);
759
760 return clk->parent->rate / clk->fixed_div;
761 }
762
763 void clk_reparent(struct clk *child, struct clk *parent)
764 {
765 list_del_init(&child->sibling);
766 if (parent)
767 list_add(&child->sibling, &parent->children);
768 child->parent = parent;
769
770 /* now do the debugfs renaming to reattach the child
771 to the proper parent */
772 }
773
774 /* Propagate rate to children */
775 void propagate_rate(struct clk *tclk)
776 {
777 struct clk *clkp;
778
779 list_for_each_entry(clkp, &tclk->children, sibling) {
780 if (clkp->recalc)
781 clkp->rate = clkp->recalc(clkp);
782 propagate_rate(clkp);
783 }
784 }
785
786 static LIST_HEAD(root_clks);
787
788 /**
789 * recalculate_root_clocks - recalculate and propagate all root clocks
790 *
791 * Recalculates all root clocks (clocks with no parent), which if the
792 * clock's .recalc is set correctly, should also propagate their rates.
793 * Called at init.
794 */
795 void recalculate_root_clocks(void)
796 {
797 struct clk *clkp;
798
799 list_for_each_entry(clkp, &root_clks, sibling) {
800 if (clkp->recalc)
801 clkp->rate = clkp->recalc(clkp);
802 propagate_rate(clkp);
803 }
804 }
805
806 /**
807 * clk_preinit - initialize any fields in the struct clk before clk init
808 * @clk: struct clk * to initialize
809 *
810 * Initialize any struct clk fields needed before normal clk initialization
811 * can run. No return value.
812 */
813 void clk_preinit(struct clk *clk)
814 {
815 INIT_LIST_HEAD(&clk->children);
816 }
817
818 int clk_register(struct clk *clk)
819 {
820 if (clk == NULL || IS_ERR(clk))
821 return -EINVAL;
822
823 /*
824 * trap out already registered clocks
825 */
826 if (clk->node.next || clk->node.prev)
827 return 0;
828
829 mutex_lock(&clocks_mutex);
830 if (clk->parent)
831 list_add(&clk->sibling, &clk->parent->children);
832 else
833 list_add(&clk->sibling, &root_clks);
834
835 list_add(&clk->node, &clocks);
836 if (clk->init)
837 clk->init(clk);
838 mutex_unlock(&clocks_mutex);
839
840 return 0;
841 }
842 EXPORT_SYMBOL(clk_register);
843
844 void clk_unregister(struct clk *clk)
845 {
846 if (clk == NULL || IS_ERR(clk))
847 return;
848
849 mutex_lock(&clocks_mutex);
850 list_del(&clk->sibling);
851 list_del(&clk->node);
852 mutex_unlock(&clocks_mutex);
853 }
854 EXPORT_SYMBOL(clk_unregister);
855
856 void clk_enable_init_clocks(void)
857 {
858 struct clk *clkp;
859
860 list_for_each_entry(clkp, &clocks, node)
861 if (clkp->flags & ENABLE_ON_INIT)
862 clk_enable(clkp);
863 }
864
865 /**
866 * omap_clk_get_by_name - locate OMAP struct clk by its name
867 * @name: name of the struct clk to locate
868 *
869 * Locate an OMAP struct clk by its name. Assumes that struct clk
870 * names are unique. Returns NULL if not found or a pointer to the
871 * struct clk if found.
872 */
873 struct clk *omap_clk_get_by_name(const char *name)
874 {
875 struct clk *c;
876 struct clk *ret = NULL;
877
878 mutex_lock(&clocks_mutex);
879
880 list_for_each_entry(c, &clocks, node) {
881 if (!strcmp(c->name, name)) {
882 ret = c;
883 break;
884 }
885 }
886
887 mutex_unlock(&clocks_mutex);
888
889 return ret;
890 }
891
892 int omap_clk_enable_autoidle_all(void)
893 {
894 struct clk *c;
895 unsigned long flags;
896
897 spin_lock_irqsave(&clockfw_lock, flags);
898
899 list_for_each_entry(c, &clocks, node)
900 if (c->ops->allow_idle)
901 c->ops->allow_idle(c);
902
903 spin_unlock_irqrestore(&clockfw_lock, flags);
904
905 return 0;
906 }
907
908 int omap_clk_disable_autoidle_all(void)
909 {
910 struct clk *c;
911 unsigned long flags;
912
913 spin_lock_irqsave(&clockfw_lock, flags);
914
915 list_for_each_entry(c, &clocks, node)
916 if (c->ops->deny_idle)
917 c->ops->deny_idle(c);
918
919 spin_unlock_irqrestore(&clockfw_lock, flags);
920
921 return 0;
922 }
923
924 /*
925 * Low level helpers
926 */
927 static int clkll_enable_null(struct clk *clk)
928 {
929 return 0;
930 }
931
932 static void clkll_disable_null(struct clk *clk)
933 {
934 }
935
936 const struct clkops clkops_null = {
937 .enable = clkll_enable_null,
938 .disable = clkll_disable_null,
939 };
940
941 /*
942 * Dummy clock
943 *
944 * Used for clock aliases that are needed on some OMAPs, but not others
945 */
946 struct clk dummy_ck = {
947 .name = "dummy",
948 .ops = &clkops_null,
949 };
950
951 /*
952 *
953 */
954
955 #ifdef CONFIG_OMAP_RESET_CLOCKS
956 /*
957 * Disable any unused clocks left on by the bootloader
958 */
959 static int __init clk_disable_unused(void)
960 {
961 struct clk *ck;
962 unsigned long flags;
963
964 pr_info("clock: disabling unused clocks to save power\n");
965
966 spin_lock_irqsave(&clockfw_lock, flags);
967 list_for_each_entry(ck, &clocks, node) {
968 if (ck->ops == &clkops_null)
969 continue;
970
971 if (ck->usecount > 0 || !ck->enable_reg)
972 continue;
973
974 omap1_clk_disable_unused(ck);
975 }
976 spin_unlock_irqrestore(&clockfw_lock, flags);
977
978 return 0;
979 }
980 late_initcall(clk_disable_unused);
981 late_initcall(omap_clk_enable_autoidle_all);
982 #endif
983
984 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
985 /*
986 * debugfs support to trace clock tree hierarchy and attributes
987 */
988
989 #include <linux/debugfs.h>
990 #include <linux/seq_file.h>
991
992 static struct dentry *clk_debugfs_root;
993
994 static int clk_dbg_show_summary(struct seq_file *s, void *unused)
995 {
996 struct clk *c;
997 struct clk *pa;
998
999 mutex_lock(&clocks_mutex);
1000 seq_printf(s, "%-30s %-30s %-10s %s\n",
1001 "clock-name", "parent-name", "rate", "use-count");
1002
1003 list_for_each_entry(c, &clocks, node) {
1004 pa = c->parent;
1005 seq_printf(s, "%-30s %-30s %-10lu %d\n",
1006 c->name, pa ? pa->name : "none", c->rate,
1007 c->usecount);
1008 }
1009 mutex_unlock(&clocks_mutex);
1010
1011 return 0;
1012 }
1013
1014 static int clk_dbg_open(struct inode *inode, struct file *file)
1015 {
1016 return single_open(file, clk_dbg_show_summary, inode->i_private);
1017 }
1018
1019 static const struct file_operations debug_clock_fops = {
1020 .open = clk_dbg_open,
1021 .read = seq_read,
1022 .llseek = seq_lseek,
1023 .release = single_release,
1024 };
1025
1026 static int clk_debugfs_register_one(struct clk *c)
1027 {
1028 int err;
1029 struct dentry *d;
1030 struct clk *pa = c->parent;
1031
1032 d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1033 if (!d)
1034 return -ENOMEM;
1035 c->dent = d;
1036
1037 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1038 if (!d) {
1039 err = -ENOMEM;
1040 goto err_out;
1041 }
1042 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1043 if (!d) {
1044 err = -ENOMEM;
1045 goto err_out;
1046 }
1047 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1048 if (!d) {
1049 err = -ENOMEM;
1050 goto err_out;
1051 }
1052 return 0;
1053
1054 err_out:
1055 debugfs_remove_recursive(c->dent);
1056 return err;
1057 }
1058
1059 static int clk_debugfs_register(struct clk *c)
1060 {
1061 int err;
1062 struct clk *pa = c->parent;
1063
1064 if (pa && !pa->dent) {
1065 err = clk_debugfs_register(pa);
1066 if (err)
1067 return err;
1068 }
1069
1070 if (!c->dent) {
1071 err = clk_debugfs_register_one(c);
1072 if (err)
1073 return err;
1074 }
1075 return 0;
1076 }
1077
1078 static int __init clk_debugfs_init(void)
1079 {
1080 struct clk *c;
1081 struct dentry *d;
1082 int err;
1083
1084 d = debugfs_create_dir("clock", NULL);
1085 if (!d)
1086 return -ENOMEM;
1087 clk_debugfs_root = d;
1088
1089 list_for_each_entry(c, &clocks, node) {
1090 err = clk_debugfs_register(c);
1091 if (err)
1092 goto err_out;
1093 }
1094
1095 d = debugfs_create_file("summary", S_IRUGO,
1096 d, NULL, &debug_clock_fops);
1097 if (!d)
1098 return -ENOMEM;
1099
1100 return 0;
1101 err_out:
1102 debugfs_remove_recursive(clk_debugfs_root);
1103 return err;
1104 }
1105 late_initcall(clk_debugfs_init);
1106
1107 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
This page took 0.073972 seconds and 5 git commands to generate.