2 * linux/arch/arm/mach-omap1/clock.c
4 * Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
5 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
7 * Modified to use omap shared clock framework by
8 * Tony Lindgren <tony@atomide.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/clkdev.h>
23 #include <asm/mach-types.h>
28 #include <mach/hardware.h>
30 #include "../plat-omap/sram.h"
36 __u32 arm_idlect1_mask
;
37 struct clk
*api_ck_p
, *ck_dpll1_p
, *ck_ref_p
;
39 static LIST_HEAD(clocks
);
40 static DEFINE_MUTEX(clocks_mutex
);
41 static DEFINE_SPINLOCK(clockfw_lock
);
44 * Omap1 specific clock functions
47 unsigned long omap1_uart_recalc(struct clk
*clk
)
49 unsigned int val
= __raw_readl(clk
->enable_reg
);
50 return val
& clk
->enable_bit
? 48000000 : 12000000;
53 unsigned long omap1_sossi_recalc(struct clk
*clk
)
55 u32 div
= omap_readl(MOD_CONF_CTRL_1
);
57 div
= (div
>> 17) & 0x7;
60 return clk
->parent
->rate
/ div
;
63 static void omap1_clk_allow_idle(struct clk
*clk
)
65 struct arm_idlect1_clk
* iclk
= (struct arm_idlect1_clk
*)clk
;
67 if (!(clk
->flags
& CLOCK_IDLE_CONTROL
))
70 if (iclk
->no_idle_count
> 0 && !(--iclk
->no_idle_count
))
71 arm_idlect1_mask
|= 1 << iclk
->idlect_shift
;
74 static void omap1_clk_deny_idle(struct clk
*clk
)
76 struct arm_idlect1_clk
* iclk
= (struct arm_idlect1_clk
*)clk
;
78 if (!(clk
->flags
& CLOCK_IDLE_CONTROL
))
81 if (iclk
->no_idle_count
++ == 0)
82 arm_idlect1_mask
&= ~(1 << iclk
->idlect_shift
);
85 static __u16
verify_ckctl_value(__u16 newval
)
87 /* This function checks for following limitations set
88 * by the hardware (all conditions must be true):
89 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
94 * In addition following rules are enforced:
98 * However, maximum frequencies are not checked for!
107 per_exp
= (newval
>> CKCTL_PERDIV_OFFSET
) & 3;
108 lcd_exp
= (newval
>> CKCTL_LCDDIV_OFFSET
) & 3;
109 arm_exp
= (newval
>> CKCTL_ARMDIV_OFFSET
) & 3;
110 dsp_exp
= (newval
>> CKCTL_DSPDIV_OFFSET
) & 3;
111 tc_exp
= (newval
>> CKCTL_TCDIV_OFFSET
) & 3;
112 dspmmu_exp
= (newval
>> CKCTL_DSPMMUDIV_OFFSET
) & 3;
114 if (dspmmu_exp
< dsp_exp
)
115 dspmmu_exp
= dsp_exp
;
116 if (dspmmu_exp
> dsp_exp
+1)
117 dspmmu_exp
= dsp_exp
+1;
118 if (tc_exp
< arm_exp
)
120 if (tc_exp
< dspmmu_exp
)
122 if (tc_exp
> lcd_exp
)
124 if (tc_exp
> per_exp
)
128 newval
|= per_exp
<< CKCTL_PERDIV_OFFSET
;
129 newval
|= lcd_exp
<< CKCTL_LCDDIV_OFFSET
;
130 newval
|= arm_exp
<< CKCTL_ARMDIV_OFFSET
;
131 newval
|= dsp_exp
<< CKCTL_DSPDIV_OFFSET
;
132 newval
|= tc_exp
<< CKCTL_TCDIV_OFFSET
;
133 newval
|= dspmmu_exp
<< CKCTL_DSPMMUDIV_OFFSET
;
138 static int calc_dsor_exp(struct clk
*clk
, unsigned long rate
)
140 /* Note: If target frequency is too low, this function will return 4,
141 * which is invalid value. Caller must check for this value and act
144 * Note: This function does not check for following limitations set
145 * by the hardware (all conditions must be true):
146 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
151 unsigned long realrate
;
155 parent
= clk
->parent
;
156 if (unlikely(parent
== NULL
))
159 realrate
= parent
->rate
;
160 for (dsor_exp
=0; dsor_exp
<4; dsor_exp
++) {
161 if (realrate
<= rate
)
170 unsigned long omap1_ckctl_recalc(struct clk
*clk
)
172 /* Calculate divisor encoded as 2-bit exponent */
173 int dsor
= 1 << (3 & (omap_readw(ARM_CKCTL
) >> clk
->rate_offset
));
175 return clk
->parent
->rate
/ dsor
;
178 unsigned long omap1_ckctl_recalc_dsp_domain(struct clk
*clk
)
182 /* Calculate divisor encoded as 2-bit exponent
184 * The clock control bits are in DSP domain,
185 * so api_ck is needed for access.
186 * Note that DSP_CKCTL virt addr = phys addr, so
187 * we must use __raw_readw() instead of omap_readw().
189 omap1_clk_enable(api_ck_p
);
190 dsor
= 1 << (3 & (__raw_readw(DSP_CKCTL
) >> clk
->rate_offset
));
191 omap1_clk_disable(api_ck_p
);
193 return clk
->parent
->rate
/ dsor
;
196 /* MPU virtual clock functions */
197 int omap1_select_table_rate(struct clk
*clk
, unsigned long rate
)
199 /* Find the highest supported frequency <= rate and switch to it */
200 struct mpu_rate
* ptr
;
201 unsigned long ref_rate
;
203 ref_rate
= ck_ref_p
->rate
;
205 for (ptr
= omap1_rate_table
; ptr
->rate
; ptr
++) {
206 if (!(ptr
->flags
& cpu_mask
))
209 if (ptr
->xtal
!= ref_rate
)
212 /* Can check only after xtal frequency check */
213 if (ptr
->rate
<= rate
)
221 * In most cases we should not need to reprogram DPLL.
222 * Reprogramming the DPLL is tricky, it must be done from SRAM.
224 omap_sram_reprogram_clock(ptr
->dpllctl_val
, ptr
->ckctl_val
);
226 /* XXX Do we need to recalculate the tree below DPLL1 at this point? */
227 ck_dpll1_p
->rate
= ptr
->pll_rate
;
232 int omap1_clk_set_rate_dsp_domain(struct clk
*clk
, unsigned long rate
)
237 dsor_exp
= calc_dsor_exp(clk
, rate
);
243 regval
= __raw_readw(DSP_CKCTL
);
244 regval
&= ~(3 << clk
->rate_offset
);
245 regval
|= dsor_exp
<< clk
->rate_offset
;
246 __raw_writew(regval
, DSP_CKCTL
);
247 clk
->rate
= clk
->parent
->rate
/ (1 << dsor_exp
);
252 long omap1_clk_round_rate_ckctl_arm(struct clk
*clk
, unsigned long rate
)
254 int dsor_exp
= calc_dsor_exp(clk
, rate
);
259 return clk
->parent
->rate
/ (1 << dsor_exp
);
262 int omap1_clk_set_rate_ckctl_arm(struct clk
*clk
, unsigned long rate
)
267 dsor_exp
= calc_dsor_exp(clk
, rate
);
273 regval
= omap_readw(ARM_CKCTL
);
274 regval
&= ~(3 << clk
->rate_offset
);
275 regval
|= dsor_exp
<< clk
->rate_offset
;
276 regval
= verify_ckctl_value(regval
);
277 omap_writew(regval
, ARM_CKCTL
);
278 clk
->rate
= clk
->parent
->rate
/ (1 << dsor_exp
);
282 long omap1_round_to_table_rate(struct clk
*clk
, unsigned long rate
)
284 /* Find the highest supported frequency <= rate */
285 struct mpu_rate
* ptr
;
287 unsigned long ref_rate
;
289 ref_rate
= ck_ref_p
->rate
;
291 highest_rate
= -EINVAL
;
293 for (ptr
= omap1_rate_table
; ptr
->rate
; ptr
++) {
294 if (!(ptr
->flags
& cpu_mask
))
297 if (ptr
->xtal
!= ref_rate
)
300 highest_rate
= ptr
->rate
;
302 /* Can check only after xtal frequency check */
303 if (ptr
->rate
<= rate
)
310 static unsigned calc_ext_dsor(unsigned long rate
)
314 /* MCLK and BCLK divisor selection is not linear:
315 * freq = 96MHz / dsor
317 * RATIO_SEL range: dsor <-> RATIO_SEL
318 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
319 * 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
320 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
323 for (dsor
= 2; dsor
< 96; ++dsor
) {
324 if ((dsor
& 1) && dsor
> 8)
326 if (rate
>= 96000000 / dsor
)
332 /* XXX Only needed on 1510 */
333 int omap1_set_uart_rate(struct clk
*clk
, unsigned long rate
)
337 val
= __raw_readl(clk
->enable_reg
);
338 if (rate
== 12000000)
339 val
&= ~(1 << clk
->enable_bit
);
340 else if (rate
== 48000000)
341 val
|= (1 << clk
->enable_bit
);
344 __raw_writel(val
, clk
->enable_reg
);
350 /* External clock (MCLK & BCLK) functions */
351 int omap1_set_ext_clk_rate(struct clk
*clk
, unsigned long rate
)
356 dsor
= calc_ext_dsor(rate
);
357 clk
->rate
= 96000000 / dsor
;
359 ratio_bits
= ((dsor
- 8) / 2 + 6) << 2;
361 ratio_bits
= (dsor
- 2) << 2;
363 ratio_bits
|= __raw_readw(clk
->enable_reg
) & ~0xfd;
364 __raw_writew(ratio_bits
, clk
->enable_reg
);
369 int omap1_set_sossi_rate(struct clk
*clk
, unsigned long rate
)
373 unsigned long p_rate
;
375 p_rate
= clk
->parent
->rate
;
376 /* Round towards slower frequency */
377 div
= (p_rate
+ rate
- 1) / rate
;
379 if (div
< 0 || div
> 7)
382 l
= omap_readl(MOD_CONF_CTRL_1
);
385 omap_writel(l
, MOD_CONF_CTRL_1
);
387 clk
->rate
= p_rate
/ (div
+ 1);
392 long omap1_round_ext_clk_rate(struct clk
*clk
, unsigned long rate
)
394 return 96000000 / calc_ext_dsor(rate
);
397 void omap1_init_ext_clk(struct clk
*clk
)
402 /* Determine current rate and ensure clock is based on 96MHz APLL */
403 ratio_bits
= __raw_readw(clk
->enable_reg
) & ~1;
404 __raw_writew(ratio_bits
, clk
->enable_reg
);
406 ratio_bits
= (ratio_bits
& 0xfc) >> 2;
408 dsor
= (ratio_bits
- 6) * 2 + 8;
410 dsor
= ratio_bits
+ 2;
412 clk
-> rate
= 96000000 / dsor
;
415 int omap1_clk_enable(struct clk
*clk
)
419 if (clk
->usecount
++ == 0) {
421 ret
= omap1_clk_enable(clk
->parent
);
425 if (clk
->flags
& CLOCK_NO_IDLE_PARENT
)
426 omap1_clk_deny_idle(clk
->parent
);
429 ret
= clk
->ops
->enable(clk
);
432 omap1_clk_disable(clk
->parent
);
443 void omap1_clk_disable(struct clk
*clk
)
445 if (clk
->usecount
> 0 && !(--clk
->usecount
)) {
446 clk
->ops
->disable(clk
);
447 if (likely(clk
->parent
)) {
448 omap1_clk_disable(clk
->parent
);
449 if (clk
->flags
& CLOCK_NO_IDLE_PARENT
)
450 omap1_clk_allow_idle(clk
->parent
);
455 static int omap1_clk_enable_generic(struct clk
*clk
)
460 if (unlikely(clk
->enable_reg
== NULL
)) {
461 printk(KERN_ERR
"clock.c: Enable for %s without enable code\n",
466 if (clk
->flags
& ENABLE_REG_32BIT
) {
467 regval32
= __raw_readl(clk
->enable_reg
);
468 regval32
|= (1 << clk
->enable_bit
);
469 __raw_writel(regval32
, clk
->enable_reg
);
471 regval16
= __raw_readw(clk
->enable_reg
);
472 regval16
|= (1 << clk
->enable_bit
);
473 __raw_writew(regval16
, clk
->enable_reg
);
479 static void omap1_clk_disable_generic(struct clk
*clk
)
484 if (clk
->enable_reg
== NULL
)
487 if (clk
->flags
& ENABLE_REG_32BIT
) {
488 regval32
= __raw_readl(clk
->enable_reg
);
489 regval32
&= ~(1 << clk
->enable_bit
);
490 __raw_writel(regval32
, clk
->enable_reg
);
492 regval16
= __raw_readw(clk
->enable_reg
);
493 regval16
&= ~(1 << clk
->enable_bit
);
494 __raw_writew(regval16
, clk
->enable_reg
);
498 const struct clkops clkops_generic
= {
499 .enable
= omap1_clk_enable_generic
,
500 .disable
= omap1_clk_disable_generic
,
503 static int omap1_clk_enable_dsp_domain(struct clk
*clk
)
507 retval
= omap1_clk_enable(api_ck_p
);
509 retval
= omap1_clk_enable_generic(clk
);
510 omap1_clk_disable(api_ck_p
);
516 static void omap1_clk_disable_dsp_domain(struct clk
*clk
)
518 if (omap1_clk_enable(api_ck_p
) == 0) {
519 omap1_clk_disable_generic(clk
);
520 omap1_clk_disable(api_ck_p
);
524 const struct clkops clkops_dspck
= {
525 .enable
= omap1_clk_enable_dsp_domain
,
526 .disable
= omap1_clk_disable_dsp_domain
,
529 /* XXX SYSC register handling does not belong in the clock framework */
530 static int omap1_clk_enable_uart_functional_16xx(struct clk
*clk
)
533 struct uart_clk
*uclk
;
535 ret
= omap1_clk_enable_generic(clk
);
537 /* Set smart idle acknowledgement mode */
538 uclk
= (struct uart_clk
*)clk
;
539 omap_writeb((omap_readb(uclk
->sysc_addr
) & ~0x10) | 8,
546 /* XXX SYSC register handling does not belong in the clock framework */
547 static void omap1_clk_disable_uart_functional_16xx(struct clk
*clk
)
549 struct uart_clk
*uclk
;
551 /* Set force idle acknowledgement mode */
552 uclk
= (struct uart_clk
*)clk
;
553 omap_writeb((omap_readb(uclk
->sysc_addr
) & ~0x18), uclk
->sysc_addr
);
555 omap1_clk_disable_generic(clk
);
558 /* XXX SYSC register handling does not belong in the clock framework */
559 const struct clkops clkops_uart_16xx
= {
560 .enable
= omap1_clk_enable_uart_functional_16xx
,
561 .disable
= omap1_clk_disable_uart_functional_16xx
,
564 long omap1_clk_round_rate(struct clk
*clk
, unsigned long rate
)
566 if (clk
->round_rate
!= NULL
)
567 return clk
->round_rate(clk
, rate
);
572 int omap1_clk_set_rate(struct clk
*clk
, unsigned long rate
)
577 ret
= clk
->set_rate(clk
, rate
);
582 * Omap1 clock reset and init functions
585 #ifdef CONFIG_OMAP_RESET_CLOCKS
587 void omap1_clk_disable_unused(struct clk
*clk
)
591 /* Clocks in the DSP domain need api_ck. Just assume bootloader
592 * has not enabled any DSP clocks */
593 if (clk
->enable_reg
== DSP_IDLECT2
) {
594 pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
599 /* Is the clock already disabled? */
600 if (clk
->flags
& ENABLE_REG_32BIT
)
601 regval32
= __raw_readl(clk
->enable_reg
);
603 regval32
= __raw_readw(clk
->enable_reg
);
605 if ((regval32
& (1 << clk
->enable_bit
)) == 0)
608 printk(KERN_INFO
"Disabling unused clock \"%s\"... ", clk
->name
);
609 clk
->ops
->disable(clk
);
616 int clk_enable(struct clk
*clk
)
621 if (clk
== NULL
|| IS_ERR(clk
))
624 spin_lock_irqsave(&clockfw_lock
, flags
);
625 ret
= omap1_clk_enable(clk
);
626 spin_unlock_irqrestore(&clockfw_lock
, flags
);
630 EXPORT_SYMBOL(clk_enable
);
632 void clk_disable(struct clk
*clk
)
636 if (clk
== NULL
|| IS_ERR(clk
))
639 spin_lock_irqsave(&clockfw_lock
, flags
);
640 if (clk
->usecount
== 0) {
641 pr_err("Trying disable clock %s with 0 usecount\n",
647 omap1_clk_disable(clk
);
650 spin_unlock_irqrestore(&clockfw_lock
, flags
);
652 EXPORT_SYMBOL(clk_disable
);
654 unsigned long clk_get_rate(struct clk
*clk
)
659 if (clk
== NULL
|| IS_ERR(clk
))
662 spin_lock_irqsave(&clockfw_lock
, flags
);
664 spin_unlock_irqrestore(&clockfw_lock
, flags
);
668 EXPORT_SYMBOL(clk_get_rate
);
671 * Optional clock functions defined in include/linux/clk.h
674 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
679 if (clk
== NULL
|| IS_ERR(clk
))
682 spin_lock_irqsave(&clockfw_lock
, flags
);
683 ret
= omap1_clk_round_rate(clk
, rate
);
684 spin_unlock_irqrestore(&clockfw_lock
, flags
);
688 EXPORT_SYMBOL(clk_round_rate
);
690 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
695 if (clk
== NULL
|| IS_ERR(clk
))
698 spin_lock_irqsave(&clockfw_lock
, flags
);
699 ret
= omap1_clk_set_rate(clk
, rate
);
702 spin_unlock_irqrestore(&clockfw_lock
, flags
);
706 EXPORT_SYMBOL(clk_set_rate
);
708 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
710 WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
714 EXPORT_SYMBOL(clk_set_parent
);
716 struct clk
*clk_get_parent(struct clk
*clk
)
720 EXPORT_SYMBOL(clk_get_parent
);
723 * OMAP specific clock functions shared between omap1 and omap2
726 int __initdata mpurate
;
729 * By default we use the rate set by the bootloader.
730 * You can override this with mpurate= cmdline option.
732 static int __init
omap_clk_setup(char *str
)
734 get_option(&str
, &mpurate
);
744 __setup("mpurate=", omap_clk_setup
);
746 /* Used for clocks that always have same value as the parent clock */
747 unsigned long followparent_recalc(struct clk
*clk
)
749 return clk
->parent
->rate
;
753 * Used for clocks that have the same value as the parent clock,
754 * divided by some factor
756 unsigned long omap_fixed_divisor_recalc(struct clk
*clk
)
758 WARN_ON(!clk
->fixed_div
);
760 return clk
->parent
->rate
/ clk
->fixed_div
;
763 void clk_reparent(struct clk
*child
, struct clk
*parent
)
765 list_del_init(&child
->sibling
);
767 list_add(&child
->sibling
, &parent
->children
);
768 child
->parent
= parent
;
770 /* now do the debugfs renaming to reattach the child
771 to the proper parent */
774 /* Propagate rate to children */
775 void propagate_rate(struct clk
*tclk
)
779 list_for_each_entry(clkp
, &tclk
->children
, sibling
) {
781 clkp
->rate
= clkp
->recalc(clkp
);
782 propagate_rate(clkp
);
786 static LIST_HEAD(root_clks
);
789 * recalculate_root_clocks - recalculate and propagate all root clocks
791 * Recalculates all root clocks (clocks with no parent), which if the
792 * clock's .recalc is set correctly, should also propagate their rates.
795 void recalculate_root_clocks(void)
799 list_for_each_entry(clkp
, &root_clks
, sibling
) {
801 clkp
->rate
= clkp
->recalc(clkp
);
802 propagate_rate(clkp
);
807 * clk_preinit - initialize any fields in the struct clk before clk init
808 * @clk: struct clk * to initialize
810 * Initialize any struct clk fields needed before normal clk initialization
811 * can run. No return value.
813 void clk_preinit(struct clk
*clk
)
815 INIT_LIST_HEAD(&clk
->children
);
818 int clk_register(struct clk
*clk
)
820 if (clk
== NULL
|| IS_ERR(clk
))
824 * trap out already registered clocks
826 if (clk
->node
.next
|| clk
->node
.prev
)
829 mutex_lock(&clocks_mutex
);
831 list_add(&clk
->sibling
, &clk
->parent
->children
);
833 list_add(&clk
->sibling
, &root_clks
);
835 list_add(&clk
->node
, &clocks
);
838 mutex_unlock(&clocks_mutex
);
842 EXPORT_SYMBOL(clk_register
);
844 void clk_unregister(struct clk
*clk
)
846 if (clk
== NULL
|| IS_ERR(clk
))
849 mutex_lock(&clocks_mutex
);
850 list_del(&clk
->sibling
);
851 list_del(&clk
->node
);
852 mutex_unlock(&clocks_mutex
);
854 EXPORT_SYMBOL(clk_unregister
);
856 void clk_enable_init_clocks(void)
860 list_for_each_entry(clkp
, &clocks
, node
)
861 if (clkp
->flags
& ENABLE_ON_INIT
)
866 * omap_clk_get_by_name - locate OMAP struct clk by its name
867 * @name: name of the struct clk to locate
869 * Locate an OMAP struct clk by its name. Assumes that struct clk
870 * names are unique. Returns NULL if not found or a pointer to the
871 * struct clk if found.
873 struct clk
*omap_clk_get_by_name(const char *name
)
876 struct clk
*ret
= NULL
;
878 mutex_lock(&clocks_mutex
);
880 list_for_each_entry(c
, &clocks
, node
) {
881 if (!strcmp(c
->name
, name
)) {
887 mutex_unlock(&clocks_mutex
);
892 int omap_clk_enable_autoidle_all(void)
897 spin_lock_irqsave(&clockfw_lock
, flags
);
899 list_for_each_entry(c
, &clocks
, node
)
900 if (c
->ops
->allow_idle
)
901 c
->ops
->allow_idle(c
);
903 spin_unlock_irqrestore(&clockfw_lock
, flags
);
908 int omap_clk_disable_autoidle_all(void)
913 spin_lock_irqsave(&clockfw_lock
, flags
);
915 list_for_each_entry(c
, &clocks
, node
)
916 if (c
->ops
->deny_idle
)
917 c
->ops
->deny_idle(c
);
919 spin_unlock_irqrestore(&clockfw_lock
, flags
);
927 static int clkll_enable_null(struct clk
*clk
)
932 static void clkll_disable_null(struct clk
*clk
)
936 const struct clkops clkops_null
= {
937 .enable
= clkll_enable_null
,
938 .disable
= clkll_disable_null
,
944 * Used for clock aliases that are needed on some OMAPs, but not others
946 struct clk dummy_ck
= {
955 #ifdef CONFIG_OMAP_RESET_CLOCKS
957 * Disable any unused clocks left on by the bootloader
959 static int __init
clk_disable_unused(void)
964 pr_info("clock: disabling unused clocks to save power\n");
966 spin_lock_irqsave(&clockfw_lock
, flags
);
967 list_for_each_entry(ck
, &clocks
, node
) {
968 if (ck
->ops
== &clkops_null
)
971 if (ck
->usecount
> 0 || !ck
->enable_reg
)
974 omap1_clk_disable_unused(ck
);
976 spin_unlock_irqrestore(&clockfw_lock
, flags
);
980 late_initcall(clk_disable_unused
);
981 late_initcall(omap_clk_enable_autoidle_all
);
984 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
986 * debugfs support to trace clock tree hierarchy and attributes
989 #include <linux/debugfs.h>
990 #include <linux/seq_file.h>
992 static struct dentry
*clk_debugfs_root
;
994 static int clk_dbg_show_summary(struct seq_file
*s
, void *unused
)
999 mutex_lock(&clocks_mutex
);
1000 seq_printf(s
, "%-30s %-30s %-10s %s\n",
1001 "clock-name", "parent-name", "rate", "use-count");
1003 list_for_each_entry(c
, &clocks
, node
) {
1005 seq_printf(s
, "%-30s %-30s %-10lu %d\n",
1006 c
->name
, pa
? pa
->name
: "none", c
->rate
,
1009 mutex_unlock(&clocks_mutex
);
1014 static int clk_dbg_open(struct inode
*inode
, struct file
*file
)
1016 return single_open(file
, clk_dbg_show_summary
, inode
->i_private
);
1019 static const struct file_operations debug_clock_fops
= {
1020 .open
= clk_dbg_open
,
1022 .llseek
= seq_lseek
,
1023 .release
= single_release
,
1026 static int clk_debugfs_register_one(struct clk
*c
)
1030 struct clk
*pa
= c
->parent
;
1032 d
= debugfs_create_dir(c
->name
, pa
? pa
->dent
: clk_debugfs_root
);
1037 d
= debugfs_create_u8("usecount", S_IRUGO
, c
->dent
, (u8
*)&c
->usecount
);
1042 d
= debugfs_create_u32("rate", S_IRUGO
, c
->dent
, (u32
*)&c
->rate
);
1047 d
= debugfs_create_x32("flags", S_IRUGO
, c
->dent
, (u32
*)&c
->flags
);
1055 debugfs_remove_recursive(c
->dent
);
1059 static int clk_debugfs_register(struct clk
*c
)
1062 struct clk
*pa
= c
->parent
;
1064 if (pa
&& !pa
->dent
) {
1065 err
= clk_debugfs_register(pa
);
1071 err
= clk_debugfs_register_one(c
);
1078 static int __init
clk_debugfs_init(void)
1084 d
= debugfs_create_dir("clock", NULL
);
1087 clk_debugfs_root
= d
;
1089 list_for_each_entry(c
, &clocks
, node
) {
1090 err
= clk_debugfs_register(c
);
1095 d
= debugfs_create_file("summary", S_IRUGO
,
1096 d
, NULL
, &debug_clock_fops
);
1102 debugfs_remove_recursive(clk_debugfs_root
);
1105 late_initcall(clk_debugfs_init
);
1107 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */