ARM: OMAP: remove plat/clock.h
[deliverable/linux.git] / arch / arm / mach-omap2 / clock.c
1 /*
2 * linux/arch/arm/mach-omap2/clock.c
3 *
4 * Copyright (C) 2005-2008 Texas Instruments, Inc.
5 * Copyright (C) 2004-2010 Nokia Corporation
6 *
7 * Contacts:
8 * Richard Woodruff <r-woodruff2@ti.com>
9 * Paul Walmsley
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15 #undef DEBUG
16
17 #include <linux/kernel.h>
18 #include <linux/export.h>
19 #include <linux/list.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
24 #include <linux/io.h>
25 #include <linux/bitops.h>
26
27 #include <asm/cpu.h>
28
29 #include <plat/prcm.h>
30
31 #include <trace/events/power.h>
32
33 #include "soc.h"
34 #include "clockdomain.h"
35 #include "clock.h"
36 #include "cm2xxx_3xxx.h"
37 #include "cm-regbits-24xx.h"
38 #include "cm-regbits-34xx.h"
39
40 u16 cpu_mask;
41
42 /*
43 * clkdm_control: if true, then when a clock is enabled in the
44 * hardware, its clockdomain will first be enabled; and when a clock
45 * is disabled in the hardware, its clockdomain will be disabled
46 * afterwards.
47 */
48 static bool clkdm_control = true;
49
50 static LIST_HEAD(clocks);
51 static DEFINE_MUTEX(clocks_mutex);
52 static DEFINE_SPINLOCK(clockfw_lock);
53
54 /*
55 * OMAP2+ specific clock functions
56 */
57
58 /* Private functions */
59
60 /**
61 * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
62 * @clk: struct clk * belonging to the module
63 *
64 * If the necessary clocks for the OMAP hardware IP block that
65 * corresponds to clock @clk are enabled, then wait for the module to
66 * indicate readiness (i.e., to leave IDLE). This code does not
67 * belong in the clock code and will be moved in the medium term to
68 * module-dependent code. No return value.
69 */
70 static void _omap2_module_wait_ready(struct clk *clk)
71 {
72 void __iomem *companion_reg, *idlest_reg;
73 u8 other_bit, idlest_bit, idlest_val;
74
75 /* Not all modules have multiple clocks that their IDLEST depends on */
76 if (clk->ops->find_companion) {
77 clk->ops->find_companion(clk, &companion_reg, &other_bit);
78 if (!(__raw_readl(companion_reg) & (1 << other_bit)))
79 return;
80 }
81
82 clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
83
84 omap2_cm_wait_idlest(idlest_reg, (1 << idlest_bit), idlest_val,
85 __clk_get_name(clk));
86 }
87
88 /* Public functions */
89
90 /**
91 * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
92 * @clk: OMAP clock struct ptr to use
93 *
94 * Convert a clockdomain name stored in a struct clk 'clk' into a
95 * clockdomain pointer, and save it into the struct clk. Intended to be
96 * called during clk_register(). No return value.
97 */
98 void omap2_init_clk_clkdm(struct clk *clk)
99 {
100 struct clockdomain *clkdm;
101 const char *clk_name;
102
103 if (!clk->clkdm_name)
104 return;
105
106 clk_name = __clk_get_name(clk);
107
108 clkdm = clkdm_lookup(clk->clkdm_name);
109 if (clkdm) {
110 pr_debug("clock: associated clk %s to clkdm %s\n",
111 clk_name, clk->clkdm_name);
112 clk->clkdm = clkdm;
113 } else {
114 pr_debug("clock: could not associate clk %s to clkdm %s\n",
115 clk_name, clk->clkdm_name);
116 }
117 }
118
119 /**
120 * omap2_clk_disable_clkdm_control - disable clkdm control on clk enable/disable
121 *
122 * Prevent the OMAP clock code from calling into the clockdomain code
123 * when a hardware clock in that clockdomain is enabled or disabled.
124 * Intended to be called at init time from omap*_clk_init(). No
125 * return value.
126 */
127 void __init omap2_clk_disable_clkdm_control(void)
128 {
129 clkdm_control = false;
130 }
131
132 /**
133 * omap2_clk_dflt_find_companion - find companion clock to @clk
134 * @clk: struct clk * to find the companion clock of
135 * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
136 * @other_bit: u8 ** to return the companion clock bit shift in
137 *
138 * Note: We don't need special code here for INVERT_ENABLE for the
139 * time being since INVERT_ENABLE only applies to clocks enabled by
140 * CM_CLKEN_PLL
141 *
142 * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes it's
143 * just a matter of XORing the bits.
144 *
145 * Some clocks don't have companion clocks. For example, modules with
146 * only an interface clock (such as MAILBOXES) don't have a companion
147 * clock. Right now, this code relies on the hardware exporting a bit
148 * in the correct companion register that indicates that the
149 * nonexistent 'companion clock' is active. Future patches will
150 * associate this type of code with per-module data structures to
151 * avoid this issue, and remove the casts. No return value.
152 */
153 void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
154 u8 *other_bit)
155 {
156 u32 r;
157
158 /*
159 * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes
160 * it's just a matter of XORing the bits.
161 */
162 r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
163
164 *other_reg = (__force void __iomem *)r;
165 *other_bit = clk->enable_bit;
166 }
167
168 /**
169 * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
170 * @clk: struct clk * to find IDLEST info for
171 * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
172 * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
173 * @idlest_val: u8 * to return the idle status indicator
174 *
175 * Return the CM_IDLEST register address and bit shift corresponding
176 * to the module that "owns" this clock. This default code assumes
177 * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
178 * the IDLEST register address ID corresponds to the CM_*CLKEN
179 * register address ID (e.g., that CM_FCLKEN2 corresponds to
180 * CM_IDLEST2). This is not true for all modules. No return value.
181 */
182 void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
183 u8 *idlest_bit, u8 *idlest_val)
184 {
185 u32 r;
186
187 r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
188 *idlest_reg = (__force void __iomem *)r;
189 *idlest_bit = clk->enable_bit;
190
191 /*
192 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
193 * 34xx reverses this, just to keep us on our toes
194 * AM35xx uses both, depending on the module.
195 */
196 if (cpu_is_omap24xx())
197 *idlest_val = OMAP24XX_CM_IDLEST_VAL;
198 else if (cpu_is_omap34xx())
199 *idlest_val = OMAP34XX_CM_IDLEST_VAL;
200 else
201 BUG();
202
203 }
204
205 int omap2_dflt_clk_enable(struct clk *clk)
206 {
207 u32 v;
208
209 if (unlikely(clk->enable_reg == NULL)) {
210 pr_err("clock.c: Enable for %s without enable code\n",
211 clk->name);
212 return 0; /* REVISIT: -EINVAL */
213 }
214
215 v = __raw_readl(clk->enable_reg);
216 if (clk->flags & INVERT_ENABLE)
217 v &= ~(1 << clk->enable_bit);
218 else
219 v |= (1 << clk->enable_bit);
220 __raw_writel(v, clk->enable_reg);
221 v = __raw_readl(clk->enable_reg); /* OCP barrier */
222
223 if (clk->ops->find_idlest)
224 _omap2_module_wait_ready(clk);
225
226 return 0;
227 }
228
229 void omap2_dflt_clk_disable(struct clk *clk)
230 {
231 u32 v;
232
233 if (!clk->enable_reg) {
234 /*
235 * 'Independent' here refers to a clock which is not
236 * controlled by its parent.
237 */
238 pr_err("clock: clk_disable called on independent clock %s which has no enable_reg\n", clk->name);
239 return;
240 }
241
242 v = __raw_readl(clk->enable_reg);
243 if (clk->flags & INVERT_ENABLE)
244 v |= (1 << clk->enable_bit);
245 else
246 v &= ~(1 << clk->enable_bit);
247 __raw_writel(v, clk->enable_reg);
248 /* No OCP barrier needed here since it is a disable operation */
249 }
250
251 const struct clkops clkops_omap2_dflt_wait = {
252 .enable = omap2_dflt_clk_enable,
253 .disable = omap2_dflt_clk_disable,
254 .find_companion = omap2_clk_dflt_find_companion,
255 .find_idlest = omap2_clk_dflt_find_idlest,
256 };
257
258 const struct clkops clkops_omap2_dflt = {
259 .enable = omap2_dflt_clk_enable,
260 .disable = omap2_dflt_clk_disable,
261 };
262
263 /**
264 * omap2_clk_disable - disable a clock, if the system is not using it
265 * @clk: struct clk * to disable
266 *
267 * Decrements the usecount on struct clk @clk. If there are no users
268 * left, call the clkops-specific clock disable function to disable it
269 * in hardware. If the clock is part of a clockdomain (which they all
270 * should be), request that the clockdomain be disabled. (It too has
271 * a usecount, and so will not be disabled in the hardware until it no
272 * longer has any users.) If the clock has a parent clock (most of
273 * them do), then call ourselves, recursing on the parent clock. This
274 * can cause an entire branch of the clock tree to be powered off by
275 * simply disabling one clock. Intended to be called with the clockfw_lock
276 * spinlock held. No return value.
277 */
278 void omap2_clk_disable(struct clk *clk)
279 {
280 if (clk->usecount == 0) {
281 WARN(1, "clock: %s: omap2_clk_disable() called, but usecount already 0?", clk->name);
282 return;
283 }
284
285 pr_debug("clock: %s: decrementing usecount\n", clk->name);
286
287 clk->usecount--;
288
289 if (clk->usecount > 0)
290 return;
291
292 pr_debug("clock: %s: disabling in hardware\n", clk->name);
293
294 if (clk->ops && clk->ops->disable) {
295 trace_clock_disable(clk->name, 0, smp_processor_id());
296 clk->ops->disable(clk);
297 }
298
299 if (clkdm_control && clk->clkdm)
300 clkdm_clk_disable(clk->clkdm, clk);
301
302 if (clk->parent)
303 omap2_clk_disable(clk->parent);
304 }
305
306 /**
307 * omap2_clk_enable - request that the system enable a clock
308 * @clk: struct clk * to enable
309 *
310 * Increments the usecount on struct clk @clk. If there were no users
311 * previously, then recurse up the clock tree, enabling all of the
312 * clock's parents and all of the parent clockdomains, and finally,
313 * enabling @clk's clockdomain, and @clk itself. Intended to be
314 * called with the clockfw_lock spinlock held. Returns 0 upon success
315 * or a negative error code upon failure.
316 */
317 int omap2_clk_enable(struct clk *clk)
318 {
319 int ret;
320
321 pr_debug("clock: %s: incrementing usecount\n", clk->name);
322
323 clk->usecount++;
324
325 if (clk->usecount > 1)
326 return 0;
327
328 pr_debug("clock: %s: enabling in hardware\n", clk->name);
329
330 if (clk->parent) {
331 ret = omap2_clk_enable(clk->parent);
332 if (ret) {
333 WARN(1, "clock: %s: could not enable parent %s: %d\n",
334 clk->name, clk->parent->name, ret);
335 goto oce_err1;
336 }
337 }
338
339 if (clkdm_control && clk->clkdm) {
340 ret = clkdm_clk_enable(clk->clkdm, clk);
341 if (ret) {
342 WARN(1, "clock: %s: could not enable clockdomain %s: %d\n",
343 clk->name, clk->clkdm->name, ret);
344 goto oce_err2;
345 }
346 }
347
348 if (clk->ops && clk->ops->enable) {
349 trace_clock_enable(clk->name, 1, smp_processor_id());
350 ret = clk->ops->enable(clk);
351 if (ret) {
352 WARN(1, "clock: %s: could not enable: %d\n",
353 clk->name, ret);
354 goto oce_err3;
355 }
356 }
357
358 return 0;
359
360 oce_err3:
361 if (clkdm_control && clk->clkdm)
362 clkdm_clk_disable(clk->clkdm, clk);
363 oce_err2:
364 if (clk->parent)
365 omap2_clk_disable(clk->parent);
366 oce_err1:
367 clk->usecount--;
368
369 return ret;
370 }
371
372 /* Given a clock and a rate apply a clock specific rounding function */
373 long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
374 {
375 if (clk->round_rate)
376 return clk->round_rate(clk, rate);
377
378 return clk->rate;
379 }
380
381 /* Set the clock rate for a clock source */
382 int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
383 {
384 int ret = -EINVAL;
385
386 pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate);
387
388 /* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */
389 if (clk->set_rate) {
390 trace_clock_set_rate(clk->name, rate, smp_processor_id());
391 ret = clk->set_rate(clk, rate);
392 }
393
394 return ret;
395 }
396
397 int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
398 {
399 if (!clk->clksel)
400 return -EINVAL;
401
402 if (clk->parent == new_parent)
403 return 0;
404
405 return omap2_clksel_set_parent(clk, new_parent);
406 }
407
408 /*
409 * OMAP2+ clock reset and init functions
410 */
411
412 #ifdef CONFIG_OMAP_RESET_CLOCKS
413 void omap2_clk_disable_unused(struct clk *clk)
414 {
415 u32 regval32, v;
416
417 v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0;
418
419 regval32 = __raw_readl(clk->enable_reg);
420 if ((regval32 & (1 << clk->enable_bit)) == v)
421 return;
422
423 pr_debug("Disabling unused clock \"%s\"\n", clk->name);
424 if (cpu_is_omap34xx()) {
425 omap2_clk_enable(clk);
426 omap2_clk_disable(clk);
427 } else {
428 clk->ops->disable(clk);
429 }
430 if (clk->clkdm != NULL)
431 pwrdm_state_switch(clk->clkdm->pwrdm.ptr);
432 }
433 #endif
434
435 /**
436 * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
437 * @mpurate_ck_name: clk name of the clock to change rate
438 *
439 * Change the ARM MPU clock rate to the rate specified on the command
440 * line, if one was specified. @mpurate_ck_name should be
441 * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx.
442 * XXX Does not handle voltage scaling - on OMAP2xxx this is currently
443 * handled by the virt_prcm_set clock, but this should be handled by
444 * the OPP layer. XXX This is intended to be handled by the OPP layer
445 * code in the near future and should be removed from the clock code.
446 * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects
447 * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name
448 * cannot be found, or 0 upon success.
449 */
450 int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
451 {
452 struct clk *mpurate_ck;
453 int r;
454
455 if (!mpurate)
456 return -EINVAL;
457
458 mpurate_ck = clk_get(NULL, mpurate_ck_name);
459 if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name))
460 return -ENOENT;
461
462 r = clk_set_rate(mpurate_ck, mpurate);
463 if (IS_ERR_VALUE(r)) {
464 WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
465 mpurate_ck->name, mpurate, r);
466 clk_put(mpurate_ck);
467 return -EINVAL;
468 }
469
470 calibrate_delay();
471 recalculate_root_clocks();
472
473 clk_put(mpurate_ck);
474
475 return 0;
476 }
477
478 /**
479 * omap2_clk_print_new_rates - print summary of current clock tree rates
480 * @hfclkin_ck_name: clk name for the off-chip HF oscillator
481 * @core_ck_name: clk name for the on-chip CORE_CLK
482 * @mpu_ck_name: clk name for the ARM MPU clock
483 *
484 * Prints a short message to the console with the HFCLKIN oscillator
485 * rate, the rate of the CORE clock, and the rate of the ARM MPU clock.
486 * Called by the boot-time MPU rate switching code. XXX This is intended
487 * to be handled by the OPP layer code in the near future and should be
488 * removed from the clock code. No return value.
489 */
490 void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name,
491 const char *core_ck_name,
492 const char *mpu_ck_name)
493 {
494 struct clk *hfclkin_ck, *core_ck, *mpu_ck;
495 unsigned long hfclkin_rate;
496
497 mpu_ck = clk_get(NULL, mpu_ck_name);
498 if (WARN(IS_ERR(mpu_ck), "clock: failed to get %s.\n", mpu_ck_name))
499 return;
500
501 core_ck = clk_get(NULL, core_ck_name);
502 if (WARN(IS_ERR(core_ck), "clock: failed to get %s.\n", core_ck_name))
503 return;
504
505 hfclkin_ck = clk_get(NULL, hfclkin_ck_name);
506 if (WARN(IS_ERR(hfclkin_ck), "Failed to get %s.\n", hfclkin_ck_name))
507 return;
508
509 hfclkin_rate = clk_get_rate(hfclkin_ck);
510
511 pr_info("Switched to new clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
512 (hfclkin_rate / 1000000), ((hfclkin_rate / 100000) % 10),
513 (clk_get_rate(core_ck) / 1000000),
514 (clk_get_rate(mpu_ck) / 1000000));
515 }
516
517 /* Common data */
518
519 int clk_enable(struct clk *clk)
520 {
521 unsigned long flags;
522 int ret;
523
524 if (clk == NULL || IS_ERR(clk))
525 return -EINVAL;
526
527 spin_lock_irqsave(&clockfw_lock, flags);
528 ret = omap2_clk_enable(clk);
529 spin_unlock_irqrestore(&clockfw_lock, flags);
530
531 return ret;
532 }
533 EXPORT_SYMBOL(clk_enable);
534
535 void clk_disable(struct clk *clk)
536 {
537 unsigned long flags;
538
539 if (clk == NULL || IS_ERR(clk))
540 return;
541
542 spin_lock_irqsave(&clockfw_lock, flags);
543 if (clk->usecount == 0) {
544 pr_err("Trying disable clock %s with 0 usecount\n",
545 clk->name);
546 WARN_ON(1);
547 goto out;
548 }
549
550 omap2_clk_disable(clk);
551
552 out:
553 spin_unlock_irqrestore(&clockfw_lock, flags);
554 }
555 EXPORT_SYMBOL(clk_disable);
556
557 unsigned long clk_get_rate(struct clk *clk)
558 {
559 unsigned long flags;
560 unsigned long ret;
561
562 if (clk == NULL || IS_ERR(clk))
563 return 0;
564
565 spin_lock_irqsave(&clockfw_lock, flags);
566 ret = clk->rate;
567 spin_unlock_irqrestore(&clockfw_lock, flags);
568
569 return ret;
570 }
571 EXPORT_SYMBOL(clk_get_rate);
572
573 /*
574 * Optional clock functions defined in include/linux/clk.h
575 */
576
577 long clk_round_rate(struct clk *clk, unsigned long rate)
578 {
579 unsigned long flags;
580 long ret;
581
582 if (clk == NULL || IS_ERR(clk))
583 return 0;
584
585 spin_lock_irqsave(&clockfw_lock, flags);
586 ret = omap2_clk_round_rate(clk, rate);
587 spin_unlock_irqrestore(&clockfw_lock, flags);
588
589 return ret;
590 }
591 EXPORT_SYMBOL(clk_round_rate);
592
593 int clk_set_rate(struct clk *clk, unsigned long rate)
594 {
595 unsigned long flags;
596 int ret = -EINVAL;
597
598 if (clk == NULL || IS_ERR(clk))
599 return ret;
600
601 spin_lock_irqsave(&clockfw_lock, flags);
602 ret = omap2_clk_set_rate(clk, rate);
603 if (ret == 0)
604 propagate_rate(clk);
605 spin_unlock_irqrestore(&clockfw_lock, flags);
606
607 return ret;
608 }
609 EXPORT_SYMBOL(clk_set_rate);
610
611 int clk_set_parent(struct clk *clk, struct clk *parent)
612 {
613 unsigned long flags;
614 int ret = -EINVAL;
615
616 if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
617 return ret;
618
619 spin_lock_irqsave(&clockfw_lock, flags);
620 if (clk->usecount == 0) {
621 ret = omap2_clk_set_parent(clk, parent);
622 if (ret == 0)
623 propagate_rate(clk);
624 } else {
625 ret = -EBUSY;
626 }
627 spin_unlock_irqrestore(&clockfw_lock, flags);
628
629 return ret;
630 }
631 EXPORT_SYMBOL(clk_set_parent);
632
633 struct clk *clk_get_parent(struct clk *clk)
634 {
635 return clk->parent;
636 }
637 EXPORT_SYMBOL(clk_get_parent);
638
639 /*
640 * OMAP specific clock functions shared between omap1 and omap2
641 */
642
643 int __initdata mpurate;
644
645 /*
646 * By default we use the rate set by the bootloader.
647 * You can override this with mpurate= cmdline option.
648 */
649 static int __init omap_clk_setup(char *str)
650 {
651 get_option(&str, &mpurate);
652
653 if (!mpurate)
654 return 1;
655
656 if (mpurate < 1000)
657 mpurate *= 1000000;
658
659 return 1;
660 }
661 __setup("mpurate=", omap_clk_setup);
662
663 /* Used for clocks that always have same value as the parent clock */
664 unsigned long followparent_recalc(struct clk *clk)
665 {
666 return clk->parent->rate;
667 }
668
669 /*
670 * Used for clocks that have the same value as the parent clock,
671 * divided by some factor
672 */
673 unsigned long omap_fixed_divisor_recalc(struct clk *clk)
674 {
675 WARN_ON(!clk->fixed_div);
676
677 return clk->parent->rate / clk->fixed_div;
678 }
679
680 void clk_reparent(struct clk *child, struct clk *parent)
681 {
682 list_del_init(&child->sibling);
683 if (parent)
684 list_add(&child->sibling, &parent->children);
685 child->parent = parent;
686
687 /* now do the debugfs renaming to reattach the child
688 to the proper parent */
689 }
690
691 /* Propagate rate to children */
692 void propagate_rate(struct clk *tclk)
693 {
694 struct clk *clkp;
695
696 list_for_each_entry(clkp, &tclk->children, sibling) {
697 if (clkp->recalc)
698 clkp->rate = clkp->recalc(clkp);
699 propagate_rate(clkp);
700 }
701 }
702
703 static LIST_HEAD(root_clks);
704
705 /**
706 * recalculate_root_clocks - recalculate and propagate all root clocks
707 *
708 * Recalculates all root clocks (clocks with no parent), which if the
709 * clock's .recalc is set correctly, should also propagate their rates.
710 * Called at init.
711 */
712 void recalculate_root_clocks(void)
713 {
714 struct clk *clkp;
715
716 list_for_each_entry(clkp, &root_clks, sibling) {
717 if (clkp->recalc)
718 clkp->rate = clkp->recalc(clkp);
719 propagate_rate(clkp);
720 }
721 }
722
723 /**
724 * clk_preinit - initialize any fields in the struct clk before clk init
725 * @clk: struct clk * to initialize
726 *
727 * Initialize any struct clk fields needed before normal clk initialization
728 * can run. No return value.
729 */
730 void clk_preinit(struct clk *clk)
731 {
732 INIT_LIST_HEAD(&clk->children);
733 }
734
735 int clk_register(struct clk *clk)
736 {
737 if (clk == NULL || IS_ERR(clk))
738 return -EINVAL;
739
740 /*
741 * trap out already registered clocks
742 */
743 if (clk->node.next || clk->node.prev)
744 return 0;
745
746 mutex_lock(&clocks_mutex);
747 if (clk->parent)
748 list_add(&clk->sibling, &clk->parent->children);
749 else
750 list_add(&clk->sibling, &root_clks);
751
752 list_add(&clk->node, &clocks);
753 if (clk->init)
754 clk->init(clk);
755 mutex_unlock(&clocks_mutex);
756
757 return 0;
758 }
759 EXPORT_SYMBOL(clk_register);
760
761 void clk_unregister(struct clk *clk)
762 {
763 if (clk == NULL || IS_ERR(clk))
764 return;
765
766 mutex_lock(&clocks_mutex);
767 list_del(&clk->sibling);
768 list_del(&clk->node);
769 mutex_unlock(&clocks_mutex);
770 }
771 EXPORT_SYMBOL(clk_unregister);
772
773 void clk_enable_init_clocks(void)
774 {
775 struct clk *clkp;
776
777 list_for_each_entry(clkp, &clocks, node)
778 if (clkp->flags & ENABLE_ON_INIT)
779 clk_enable(clkp);
780 }
781
782 /**
783 * omap_clk_get_by_name - locate OMAP struct clk by its name
784 * @name: name of the struct clk to locate
785 *
786 * Locate an OMAP struct clk by its name. Assumes that struct clk
787 * names are unique. Returns NULL if not found or a pointer to the
788 * struct clk if found.
789 */
790 struct clk *omap_clk_get_by_name(const char *name)
791 {
792 struct clk *c;
793 struct clk *ret = NULL;
794
795 mutex_lock(&clocks_mutex);
796
797 list_for_each_entry(c, &clocks, node) {
798 if (!strcmp(c->name, name)) {
799 ret = c;
800 break;
801 }
802 }
803
804 mutex_unlock(&clocks_mutex);
805
806 return ret;
807 }
808
809 int omap_clk_enable_autoidle_all(void)
810 {
811 struct clk *c;
812 unsigned long flags;
813
814 spin_lock_irqsave(&clockfw_lock, flags);
815
816 list_for_each_entry(c, &clocks, node)
817 if (c->ops->allow_idle)
818 c->ops->allow_idle(c);
819
820 spin_unlock_irqrestore(&clockfw_lock, flags);
821
822 return 0;
823 }
824
825 int omap_clk_disable_autoidle_all(void)
826 {
827 struct clk *c;
828 unsigned long flags;
829
830 spin_lock_irqsave(&clockfw_lock, flags);
831
832 list_for_each_entry(c, &clocks, node)
833 if (c->ops->deny_idle)
834 c->ops->deny_idle(c);
835
836 spin_unlock_irqrestore(&clockfw_lock, flags);
837
838 return 0;
839 }
840
841 /*
842 * Low level helpers
843 */
844 static int clkll_enable_null(struct clk *clk)
845 {
846 return 0;
847 }
848
849 static void clkll_disable_null(struct clk *clk)
850 {
851 }
852
853 const struct clkops clkops_null = {
854 .enable = clkll_enable_null,
855 .disable = clkll_disable_null,
856 };
857
858 /*
859 * Dummy clock
860 *
861 * Used for clock aliases that are needed on some OMAPs, but not others
862 */
863 struct clk dummy_ck = {
864 .name = "dummy",
865 .ops = &clkops_null,
866 };
867
868 /*
869 *
870 */
871
872 #ifdef CONFIG_OMAP_RESET_CLOCKS
873 /*
874 * Disable any unused clocks left on by the bootloader
875 */
876 static int __init clk_disable_unused(void)
877 {
878 struct clk *ck;
879 unsigned long flags;
880
881 pr_info("clock: disabling unused clocks to save power\n");
882
883 spin_lock_irqsave(&clockfw_lock, flags);
884 list_for_each_entry(ck, &clocks, node) {
885 if (ck->ops == &clkops_null)
886 continue;
887
888 if (ck->usecount > 0 || !ck->enable_reg)
889 continue;
890
891 omap2_clk_disable_unused(ck);
892 }
893 spin_unlock_irqrestore(&clockfw_lock, flags);
894
895 return 0;
896 }
897 late_initcall(clk_disable_unused);
898 late_initcall(omap_clk_enable_autoidle_all);
899 #endif
900
901 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
902 /*
903 * debugfs support to trace clock tree hierarchy and attributes
904 */
905
906 #include <linux/debugfs.h>
907 #include <linux/seq_file.h>
908
909 static struct dentry *clk_debugfs_root;
910
911 static int clk_dbg_show_summary(struct seq_file *s, void *unused)
912 {
913 struct clk *c;
914 struct clk *pa;
915
916 mutex_lock(&clocks_mutex);
917 seq_printf(s, "%-30s %-30s %-10s %s\n",
918 "clock-name", "parent-name", "rate", "use-count");
919
920 list_for_each_entry(c, &clocks, node) {
921 pa = c->parent;
922 seq_printf(s, "%-30s %-30s %-10lu %d\n",
923 c->name, pa ? pa->name : "none", c->rate,
924 c->usecount);
925 }
926 mutex_unlock(&clocks_mutex);
927
928 return 0;
929 }
930
931 static int clk_dbg_open(struct inode *inode, struct file *file)
932 {
933 return single_open(file, clk_dbg_show_summary, inode->i_private);
934 }
935
936 static const struct file_operations debug_clock_fops = {
937 .open = clk_dbg_open,
938 .read = seq_read,
939 .llseek = seq_lseek,
940 .release = single_release,
941 };
942
943 static int clk_debugfs_register_one(struct clk *c)
944 {
945 int err;
946 struct dentry *d;
947 struct clk *pa = c->parent;
948
949 d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
950 if (!d)
951 return -ENOMEM;
952 c->dent = d;
953
954 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
955 if (!d) {
956 err = -ENOMEM;
957 goto err_out;
958 }
959 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
960 if (!d) {
961 err = -ENOMEM;
962 goto err_out;
963 }
964 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
965 if (!d) {
966 err = -ENOMEM;
967 goto err_out;
968 }
969 return 0;
970
971 err_out:
972 debugfs_remove_recursive(c->dent);
973 return err;
974 }
975
976 static int clk_debugfs_register(struct clk *c)
977 {
978 int err;
979 struct clk *pa = c->parent;
980
981 if (pa && !pa->dent) {
982 err = clk_debugfs_register(pa);
983 if (err)
984 return err;
985 }
986
987 if (!c->dent) {
988 err = clk_debugfs_register_one(c);
989 if (err)
990 return err;
991 }
992 return 0;
993 }
994
995 static int __init clk_debugfs_init(void)
996 {
997 struct clk *c;
998 struct dentry *d;
999 int err;
1000
1001 d = debugfs_create_dir("clock", NULL);
1002 if (!d)
1003 return -ENOMEM;
1004 clk_debugfs_root = d;
1005
1006 list_for_each_entry(c, &clocks, node) {
1007 err = clk_debugfs_register(c);
1008 if (err)
1009 goto err_out;
1010 }
1011
1012 d = debugfs_create_file("summary", S_IRUGO,
1013 d, NULL, &debug_clock_fops);
1014 if (!d)
1015 return -ENOMEM;
1016
1017 return 0;
1018 err_out:
1019 debugfs_remove_recursive(clk_debugfs_root);
1020 return err;
1021 }
1022 late_initcall(clk_debugfs_init);
1023
1024 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
1025
This page took 0.07322 seconds and 5 git commands to generate.