[media] vivid: fix CROP_BOUNDS typo for video output
[deliverable/linux.git] / drivers / clk / clk.c
1 /*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
12 #include <linux/clk-private.h>
13 #include <linux/clk/clk-conf.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/spinlock.h>
17 #include <linux/err.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/sched.h>
24
25 #include "clk.h"
26
27 static DEFINE_SPINLOCK(enable_lock);
28 static DEFINE_MUTEX(prepare_lock);
29
30 static struct task_struct *prepare_owner;
31 static struct task_struct *enable_owner;
32
33 static int prepare_refcnt;
34 static int enable_refcnt;
35
36 static HLIST_HEAD(clk_root_list);
37 static HLIST_HEAD(clk_orphan_list);
38 static LIST_HEAD(clk_notifier_list);
39
40 /*** locking ***/
41 static void clk_prepare_lock(void)
42 {
43 if (!mutex_trylock(&prepare_lock)) {
44 if (prepare_owner == current) {
45 prepare_refcnt++;
46 return;
47 }
48 mutex_lock(&prepare_lock);
49 }
50 WARN_ON_ONCE(prepare_owner != NULL);
51 WARN_ON_ONCE(prepare_refcnt != 0);
52 prepare_owner = current;
53 prepare_refcnt = 1;
54 }
55
56 static void clk_prepare_unlock(void)
57 {
58 WARN_ON_ONCE(prepare_owner != current);
59 WARN_ON_ONCE(prepare_refcnt == 0);
60
61 if (--prepare_refcnt)
62 return;
63 prepare_owner = NULL;
64 mutex_unlock(&prepare_lock);
65 }
66
67 static unsigned long clk_enable_lock(void)
68 {
69 unsigned long flags;
70
71 if (!spin_trylock_irqsave(&enable_lock, flags)) {
72 if (enable_owner == current) {
73 enable_refcnt++;
74 return flags;
75 }
76 spin_lock_irqsave(&enable_lock, flags);
77 }
78 WARN_ON_ONCE(enable_owner != NULL);
79 WARN_ON_ONCE(enable_refcnt != 0);
80 enable_owner = current;
81 enable_refcnt = 1;
82 return flags;
83 }
84
85 static void clk_enable_unlock(unsigned long flags)
86 {
87 WARN_ON_ONCE(enable_owner != current);
88 WARN_ON_ONCE(enable_refcnt == 0);
89
90 if (--enable_refcnt)
91 return;
92 enable_owner = NULL;
93 spin_unlock_irqrestore(&enable_lock, flags);
94 }
95
96 /*** debugfs support ***/
97
98 #ifdef CONFIG_DEBUG_FS
99 #include <linux/debugfs.h>
100
101 static struct dentry *rootdir;
102 static int inited = 0;
103 static DEFINE_MUTEX(clk_debug_lock);
104 static HLIST_HEAD(clk_debug_list);
105
106 static struct hlist_head *all_lists[] = {
107 &clk_root_list,
108 &clk_orphan_list,
109 NULL,
110 };
111
112 static struct hlist_head *orphan_list[] = {
113 &clk_orphan_list,
114 NULL,
115 };
116
117 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
118 {
119 if (!c)
120 return;
121
122 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
123 level * 3 + 1, "",
124 30 - level * 3, c->name,
125 c->enable_count, c->prepare_count, clk_get_rate(c),
126 clk_get_accuracy(c), clk_get_phase(c));
127 }
128
129 static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
130 int level)
131 {
132 struct clk *child;
133
134 if (!c)
135 return;
136
137 clk_summary_show_one(s, c, level);
138
139 hlist_for_each_entry(child, &c->children, child_node)
140 clk_summary_show_subtree(s, child, level + 1);
141 }
142
143 static int clk_summary_show(struct seq_file *s, void *data)
144 {
145 struct clk *c;
146 struct hlist_head **lists = (struct hlist_head **)s->private;
147
148 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
149 seq_puts(s, "----------------------------------------------------------------------------------------\n");
150
151 clk_prepare_lock();
152
153 for (; *lists; lists++)
154 hlist_for_each_entry(c, *lists, child_node)
155 clk_summary_show_subtree(s, c, 0);
156
157 clk_prepare_unlock();
158
159 return 0;
160 }
161
162
163 static int clk_summary_open(struct inode *inode, struct file *file)
164 {
165 return single_open(file, clk_summary_show, inode->i_private);
166 }
167
168 static const struct file_operations clk_summary_fops = {
169 .open = clk_summary_open,
170 .read = seq_read,
171 .llseek = seq_lseek,
172 .release = single_release,
173 };
174
175 static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
176 {
177 if (!c)
178 return;
179
180 seq_printf(s, "\"%s\": { ", c->name);
181 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
182 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
183 seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
184 seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
185 seq_printf(s, "\"phase\": %d", clk_get_phase(c));
186 }
187
188 static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
189 {
190 struct clk *child;
191
192 if (!c)
193 return;
194
195 clk_dump_one(s, c, level);
196
197 hlist_for_each_entry(child, &c->children, child_node) {
198 seq_printf(s, ",");
199 clk_dump_subtree(s, child, level + 1);
200 }
201
202 seq_printf(s, "}");
203 }
204
205 static int clk_dump(struct seq_file *s, void *data)
206 {
207 struct clk *c;
208 bool first_node = true;
209 struct hlist_head **lists = (struct hlist_head **)s->private;
210
211 seq_printf(s, "{");
212
213 clk_prepare_lock();
214
215 for (; *lists; lists++) {
216 hlist_for_each_entry(c, *lists, child_node) {
217 if (!first_node)
218 seq_puts(s, ",");
219 first_node = false;
220 clk_dump_subtree(s, c, 0);
221 }
222 }
223
224 clk_prepare_unlock();
225
226 seq_printf(s, "}");
227 return 0;
228 }
229
230
231 static int clk_dump_open(struct inode *inode, struct file *file)
232 {
233 return single_open(file, clk_dump, inode->i_private);
234 }
235
236 static const struct file_operations clk_dump_fops = {
237 .open = clk_dump_open,
238 .read = seq_read,
239 .llseek = seq_lseek,
240 .release = single_release,
241 };
242
243 /* caller must hold prepare_lock */
244 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
245 {
246 struct dentry *d;
247 int ret = -ENOMEM;
248
249 if (!clk || !pdentry) {
250 ret = -EINVAL;
251 goto out;
252 }
253
254 d = debugfs_create_dir(clk->name, pdentry);
255 if (!d)
256 goto out;
257
258 clk->dentry = d;
259
260 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
261 (u32 *)&clk->rate);
262 if (!d)
263 goto err_out;
264
265 d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry,
266 (u32 *)&clk->accuracy);
267 if (!d)
268 goto err_out;
269
270 d = debugfs_create_u32("clk_phase", S_IRUGO, clk->dentry,
271 (u32 *)&clk->phase);
272 if (!d)
273 goto err_out;
274
275 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
276 (u32 *)&clk->flags);
277 if (!d)
278 goto err_out;
279
280 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
281 (u32 *)&clk->prepare_count);
282 if (!d)
283 goto err_out;
284
285 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
286 (u32 *)&clk->enable_count);
287 if (!d)
288 goto err_out;
289
290 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
291 (u32 *)&clk->notifier_count);
292 if (!d)
293 goto err_out;
294
295 if (clk->ops->debug_init) {
296 ret = clk->ops->debug_init(clk->hw, clk->dentry);
297 if (ret)
298 goto err_out;
299 }
300
301 ret = 0;
302 goto out;
303
304 err_out:
305 debugfs_remove_recursive(clk->dentry);
306 clk->dentry = NULL;
307 out:
308 return ret;
309 }
310
311 /**
312 * clk_debug_register - add a clk node to the debugfs clk tree
313 * @clk: the clk being added to the debugfs clk tree
314 *
315 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
316 * initialized. Otherwise it bails out early since the debugfs clk tree
317 * will be created lazily by clk_debug_init as part of a late_initcall.
318 */
319 static int clk_debug_register(struct clk *clk)
320 {
321 int ret = 0;
322
323 mutex_lock(&clk_debug_lock);
324 hlist_add_head(&clk->debug_node, &clk_debug_list);
325
326 if (!inited)
327 goto unlock;
328
329 ret = clk_debug_create_one(clk, rootdir);
330 unlock:
331 mutex_unlock(&clk_debug_lock);
332
333 return ret;
334 }
335
336 /**
337 * clk_debug_unregister - remove a clk node from the debugfs clk tree
338 * @clk: the clk being removed from the debugfs clk tree
339 *
340 * Dynamically removes a clk and all it's children clk nodes from the
341 * debugfs clk tree if clk->dentry points to debugfs created by
342 * clk_debug_register in __clk_init.
343 */
344 static void clk_debug_unregister(struct clk *clk)
345 {
346 mutex_lock(&clk_debug_lock);
347 if (!clk->dentry)
348 goto out;
349
350 hlist_del_init(&clk->debug_node);
351 debugfs_remove_recursive(clk->dentry);
352 clk->dentry = NULL;
353 out:
354 mutex_unlock(&clk_debug_lock);
355 }
356
357 struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
358 void *data, const struct file_operations *fops)
359 {
360 struct dentry *d = NULL;
361
362 if (clk->dentry)
363 d = debugfs_create_file(name, mode, clk->dentry, data, fops);
364
365 return d;
366 }
367 EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
368
369 /**
370 * clk_debug_init - lazily create the debugfs clk tree visualization
371 *
372 * clks are often initialized very early during boot before memory can
373 * be dynamically allocated and well before debugfs is setup.
374 * clk_debug_init walks the clk tree hierarchy while holding
375 * prepare_lock and creates the topology as part of a late_initcall,
376 * thus insuring that clks initialized very early will still be
377 * represented in the debugfs clk tree. This function should only be
378 * called once at boot-time, and all other clks added dynamically will
379 * be done so with clk_debug_register.
380 */
381 static int __init clk_debug_init(void)
382 {
383 struct clk *clk;
384 struct dentry *d;
385
386 rootdir = debugfs_create_dir("clk", NULL);
387
388 if (!rootdir)
389 return -ENOMEM;
390
391 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
392 &clk_summary_fops);
393 if (!d)
394 return -ENOMEM;
395
396 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
397 &clk_dump_fops);
398 if (!d)
399 return -ENOMEM;
400
401 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
402 &orphan_list, &clk_summary_fops);
403 if (!d)
404 return -ENOMEM;
405
406 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
407 &orphan_list, &clk_dump_fops);
408 if (!d)
409 return -ENOMEM;
410
411 mutex_lock(&clk_debug_lock);
412 hlist_for_each_entry(clk, &clk_debug_list, debug_node)
413 clk_debug_create_one(clk, rootdir);
414
415 inited = 1;
416 mutex_unlock(&clk_debug_lock);
417
418 return 0;
419 }
420 late_initcall(clk_debug_init);
421 #else
422 static inline int clk_debug_register(struct clk *clk) { return 0; }
423 static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
424 {
425 }
426 static inline void clk_debug_unregister(struct clk *clk)
427 {
428 }
429 #endif
430
431 /* caller must hold prepare_lock */
432 static void clk_unprepare_unused_subtree(struct clk *clk)
433 {
434 struct clk *child;
435
436 if (!clk)
437 return;
438
439 hlist_for_each_entry(child, &clk->children, child_node)
440 clk_unprepare_unused_subtree(child);
441
442 if (clk->prepare_count)
443 return;
444
445 if (clk->flags & CLK_IGNORE_UNUSED)
446 return;
447
448 if (__clk_is_prepared(clk)) {
449 if (clk->ops->unprepare_unused)
450 clk->ops->unprepare_unused(clk->hw);
451 else if (clk->ops->unprepare)
452 clk->ops->unprepare(clk->hw);
453 }
454 }
455
456 /* caller must hold prepare_lock */
457 static void clk_disable_unused_subtree(struct clk *clk)
458 {
459 struct clk *child;
460 unsigned long flags;
461
462 if (!clk)
463 goto out;
464
465 hlist_for_each_entry(child, &clk->children, child_node)
466 clk_disable_unused_subtree(child);
467
468 flags = clk_enable_lock();
469
470 if (clk->enable_count)
471 goto unlock_out;
472
473 if (clk->flags & CLK_IGNORE_UNUSED)
474 goto unlock_out;
475
476 /*
477 * some gate clocks have special needs during the disable-unused
478 * sequence. call .disable_unused if available, otherwise fall
479 * back to .disable
480 */
481 if (__clk_is_enabled(clk)) {
482 if (clk->ops->disable_unused)
483 clk->ops->disable_unused(clk->hw);
484 else if (clk->ops->disable)
485 clk->ops->disable(clk->hw);
486 }
487
488 unlock_out:
489 clk_enable_unlock(flags);
490
491 out:
492 return;
493 }
494
495 static bool clk_ignore_unused;
496 static int __init clk_ignore_unused_setup(char *__unused)
497 {
498 clk_ignore_unused = true;
499 return 1;
500 }
501 __setup("clk_ignore_unused", clk_ignore_unused_setup);
502
503 static int clk_disable_unused(void)
504 {
505 struct clk *clk;
506
507 if (clk_ignore_unused) {
508 pr_warn("clk: Not disabling unused clocks\n");
509 return 0;
510 }
511
512 clk_prepare_lock();
513
514 hlist_for_each_entry(clk, &clk_root_list, child_node)
515 clk_disable_unused_subtree(clk);
516
517 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
518 clk_disable_unused_subtree(clk);
519
520 hlist_for_each_entry(clk, &clk_root_list, child_node)
521 clk_unprepare_unused_subtree(clk);
522
523 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
524 clk_unprepare_unused_subtree(clk);
525
526 clk_prepare_unlock();
527
528 return 0;
529 }
530 late_initcall_sync(clk_disable_unused);
531
532 /*** helper functions ***/
533
534 const char *__clk_get_name(struct clk *clk)
535 {
536 return !clk ? NULL : clk->name;
537 }
538 EXPORT_SYMBOL_GPL(__clk_get_name);
539
540 struct clk_hw *__clk_get_hw(struct clk *clk)
541 {
542 return !clk ? NULL : clk->hw;
543 }
544 EXPORT_SYMBOL_GPL(__clk_get_hw);
545
546 u8 __clk_get_num_parents(struct clk *clk)
547 {
548 return !clk ? 0 : clk->num_parents;
549 }
550 EXPORT_SYMBOL_GPL(__clk_get_num_parents);
551
552 struct clk *__clk_get_parent(struct clk *clk)
553 {
554 return !clk ? NULL : clk->parent;
555 }
556 EXPORT_SYMBOL_GPL(__clk_get_parent);
557
558 struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
559 {
560 if (!clk || index >= clk->num_parents)
561 return NULL;
562 else if (!clk->parents)
563 return __clk_lookup(clk->parent_names[index]);
564 else if (!clk->parents[index])
565 return clk->parents[index] =
566 __clk_lookup(clk->parent_names[index]);
567 else
568 return clk->parents[index];
569 }
570 EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
571
572 unsigned int __clk_get_enable_count(struct clk *clk)
573 {
574 return !clk ? 0 : clk->enable_count;
575 }
576
577 unsigned int __clk_get_prepare_count(struct clk *clk)
578 {
579 return !clk ? 0 : clk->prepare_count;
580 }
581
582 unsigned long __clk_get_rate(struct clk *clk)
583 {
584 unsigned long ret;
585
586 if (!clk) {
587 ret = 0;
588 goto out;
589 }
590
591 ret = clk->rate;
592
593 if (clk->flags & CLK_IS_ROOT)
594 goto out;
595
596 if (!clk->parent)
597 ret = 0;
598
599 out:
600 return ret;
601 }
602 EXPORT_SYMBOL_GPL(__clk_get_rate);
603
604 unsigned long __clk_get_accuracy(struct clk *clk)
605 {
606 if (!clk)
607 return 0;
608
609 return clk->accuracy;
610 }
611
612 unsigned long __clk_get_flags(struct clk *clk)
613 {
614 return !clk ? 0 : clk->flags;
615 }
616 EXPORT_SYMBOL_GPL(__clk_get_flags);
617
618 bool __clk_is_prepared(struct clk *clk)
619 {
620 int ret;
621
622 if (!clk)
623 return false;
624
625 /*
626 * .is_prepared is optional for clocks that can prepare
627 * fall back to software usage counter if it is missing
628 */
629 if (!clk->ops->is_prepared) {
630 ret = clk->prepare_count ? 1 : 0;
631 goto out;
632 }
633
634 ret = clk->ops->is_prepared(clk->hw);
635 out:
636 return !!ret;
637 }
638
639 bool __clk_is_enabled(struct clk *clk)
640 {
641 int ret;
642
643 if (!clk)
644 return false;
645
646 /*
647 * .is_enabled is only mandatory for clocks that gate
648 * fall back to software usage counter if .is_enabled is missing
649 */
650 if (!clk->ops->is_enabled) {
651 ret = clk->enable_count ? 1 : 0;
652 goto out;
653 }
654
655 ret = clk->ops->is_enabled(clk->hw);
656 out:
657 return !!ret;
658 }
659 EXPORT_SYMBOL_GPL(__clk_is_enabled);
660
661 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
662 {
663 struct clk *child;
664 struct clk *ret;
665
666 if (!strcmp(clk->name, name))
667 return clk;
668
669 hlist_for_each_entry(child, &clk->children, child_node) {
670 ret = __clk_lookup_subtree(name, child);
671 if (ret)
672 return ret;
673 }
674
675 return NULL;
676 }
677
678 struct clk *__clk_lookup(const char *name)
679 {
680 struct clk *root_clk;
681 struct clk *ret;
682
683 if (!name)
684 return NULL;
685
686 /* search the 'proper' clk tree first */
687 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
688 ret = __clk_lookup_subtree(name, root_clk);
689 if (ret)
690 return ret;
691 }
692
693 /* if not found, then search the orphan tree */
694 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
695 ret = __clk_lookup_subtree(name, root_clk);
696 if (ret)
697 return ret;
698 }
699
700 return NULL;
701 }
702
703 /*
704 * Helper for finding best parent to provide a given frequency. This can be used
705 * directly as a determine_rate callback (e.g. for a mux), or from a more
706 * complex clock that may combine a mux with other operations.
707 */
708 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
709 unsigned long *best_parent_rate,
710 struct clk **best_parent_p)
711 {
712 struct clk *clk = hw->clk, *parent, *best_parent = NULL;
713 int i, num_parents;
714 unsigned long parent_rate, best = 0;
715
716 /* if NO_REPARENT flag set, pass through to current parent */
717 if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
718 parent = clk->parent;
719 if (clk->flags & CLK_SET_RATE_PARENT)
720 best = __clk_round_rate(parent, rate);
721 else if (parent)
722 best = __clk_get_rate(parent);
723 else
724 best = __clk_get_rate(clk);
725 goto out;
726 }
727
728 /* find the parent that can provide the fastest rate <= rate */
729 num_parents = clk->num_parents;
730 for (i = 0; i < num_parents; i++) {
731 parent = clk_get_parent_by_index(clk, i);
732 if (!parent)
733 continue;
734 if (clk->flags & CLK_SET_RATE_PARENT)
735 parent_rate = __clk_round_rate(parent, rate);
736 else
737 parent_rate = __clk_get_rate(parent);
738 if (parent_rate <= rate && parent_rate > best) {
739 best_parent = parent;
740 best = parent_rate;
741 }
742 }
743
744 out:
745 if (best_parent)
746 *best_parent_p = best_parent;
747 *best_parent_rate = best;
748
749 return best;
750 }
751 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
752
753 /*** clk api ***/
754
755 void __clk_unprepare(struct clk *clk)
756 {
757 if (!clk)
758 return;
759
760 if (WARN_ON(clk->prepare_count == 0))
761 return;
762
763 if (--clk->prepare_count > 0)
764 return;
765
766 WARN_ON(clk->enable_count > 0);
767
768 if (clk->ops->unprepare)
769 clk->ops->unprepare(clk->hw);
770
771 __clk_unprepare(clk->parent);
772 }
773
774 /**
775 * clk_unprepare - undo preparation of a clock source
776 * @clk: the clk being unprepared
777 *
778 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
779 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
780 * if the operation may sleep. One example is a clk which is accessed over
781 * I2c. In the complex case a clk gate operation may require a fast and a slow
782 * part. It is this reason that clk_unprepare and clk_disable are not mutually
783 * exclusive. In fact clk_disable must be called before clk_unprepare.
784 */
785 void clk_unprepare(struct clk *clk)
786 {
787 if (IS_ERR_OR_NULL(clk))
788 return;
789
790 clk_prepare_lock();
791 __clk_unprepare(clk);
792 clk_prepare_unlock();
793 }
794 EXPORT_SYMBOL_GPL(clk_unprepare);
795
796 int __clk_prepare(struct clk *clk)
797 {
798 int ret = 0;
799
800 if (!clk)
801 return 0;
802
803 if (clk->prepare_count == 0) {
804 ret = __clk_prepare(clk->parent);
805 if (ret)
806 return ret;
807
808 if (clk->ops->prepare) {
809 ret = clk->ops->prepare(clk->hw);
810 if (ret) {
811 __clk_unprepare(clk->parent);
812 return ret;
813 }
814 }
815 }
816
817 clk->prepare_count++;
818
819 return 0;
820 }
821
822 /**
823 * clk_prepare - prepare a clock source
824 * @clk: the clk being prepared
825 *
826 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
827 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
828 * operation may sleep. One example is a clk which is accessed over I2c. In
829 * the complex case a clk ungate operation may require a fast and a slow part.
830 * It is this reason that clk_prepare and clk_enable are not mutually
831 * exclusive. In fact clk_prepare must be called before clk_enable.
832 * Returns 0 on success, -EERROR otherwise.
833 */
834 int clk_prepare(struct clk *clk)
835 {
836 int ret;
837
838 clk_prepare_lock();
839 ret = __clk_prepare(clk);
840 clk_prepare_unlock();
841
842 return ret;
843 }
844 EXPORT_SYMBOL_GPL(clk_prepare);
845
846 static void __clk_disable(struct clk *clk)
847 {
848 if (!clk)
849 return;
850
851 if (WARN_ON(clk->enable_count == 0))
852 return;
853
854 if (--clk->enable_count > 0)
855 return;
856
857 if (clk->ops->disable)
858 clk->ops->disable(clk->hw);
859
860 __clk_disable(clk->parent);
861 }
862
863 /**
864 * clk_disable - gate a clock
865 * @clk: the clk being gated
866 *
867 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
868 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
869 * clk if the operation is fast and will never sleep. One example is a
870 * SoC-internal clk which is controlled via simple register writes. In the
871 * complex case a clk gate operation may require a fast and a slow part. It is
872 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
873 * In fact clk_disable must be called before clk_unprepare.
874 */
875 void clk_disable(struct clk *clk)
876 {
877 unsigned long flags;
878
879 if (IS_ERR_OR_NULL(clk))
880 return;
881
882 flags = clk_enable_lock();
883 __clk_disable(clk);
884 clk_enable_unlock(flags);
885 }
886 EXPORT_SYMBOL_GPL(clk_disable);
887
888 static int __clk_enable(struct clk *clk)
889 {
890 int ret = 0;
891
892 if (!clk)
893 return 0;
894
895 if (WARN_ON(clk->prepare_count == 0))
896 return -ESHUTDOWN;
897
898 if (clk->enable_count == 0) {
899 ret = __clk_enable(clk->parent);
900
901 if (ret)
902 return ret;
903
904 if (clk->ops->enable) {
905 ret = clk->ops->enable(clk->hw);
906 if (ret) {
907 __clk_disable(clk->parent);
908 return ret;
909 }
910 }
911 }
912
913 clk->enable_count++;
914 return 0;
915 }
916
917 /**
918 * clk_enable - ungate a clock
919 * @clk: the clk being ungated
920 *
921 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
922 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
923 * if the operation will never sleep. One example is a SoC-internal clk which
924 * is controlled via simple register writes. In the complex case a clk ungate
925 * operation may require a fast and a slow part. It is this reason that
926 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
927 * must be called before clk_enable. Returns 0 on success, -EERROR
928 * otherwise.
929 */
930 int clk_enable(struct clk *clk)
931 {
932 unsigned long flags;
933 int ret;
934
935 flags = clk_enable_lock();
936 ret = __clk_enable(clk);
937 clk_enable_unlock(flags);
938
939 return ret;
940 }
941 EXPORT_SYMBOL_GPL(clk_enable);
942
943 /**
944 * __clk_round_rate - round the given rate for a clk
945 * @clk: round the rate of this clock
946 * @rate: the rate which is to be rounded
947 *
948 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
949 */
950 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
951 {
952 unsigned long parent_rate = 0;
953 struct clk *parent;
954
955 if (!clk)
956 return 0;
957
958 parent = clk->parent;
959 if (parent)
960 parent_rate = parent->rate;
961
962 if (clk->ops->determine_rate)
963 return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
964 &parent);
965 else if (clk->ops->round_rate)
966 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
967 else if (clk->flags & CLK_SET_RATE_PARENT)
968 return __clk_round_rate(clk->parent, rate);
969 else
970 return clk->rate;
971 }
972 EXPORT_SYMBOL_GPL(__clk_round_rate);
973
974 /**
975 * clk_round_rate - round the given rate for a clk
976 * @clk: the clk for which we are rounding a rate
977 * @rate: the rate which is to be rounded
978 *
979 * Takes in a rate as input and rounds it to a rate that the clk can actually
980 * use which is then returned. If clk doesn't support round_rate operation
981 * then the parent rate is returned.
982 */
983 long clk_round_rate(struct clk *clk, unsigned long rate)
984 {
985 unsigned long ret;
986
987 clk_prepare_lock();
988 ret = __clk_round_rate(clk, rate);
989 clk_prepare_unlock();
990
991 return ret;
992 }
993 EXPORT_SYMBOL_GPL(clk_round_rate);
994
995 /**
996 * __clk_notify - call clk notifier chain
997 * @clk: struct clk * that is changing rate
998 * @msg: clk notifier type (see include/linux/clk.h)
999 * @old_rate: old clk rate
1000 * @new_rate: new clk rate
1001 *
1002 * Triggers a notifier call chain on the clk rate-change notification
1003 * for 'clk'. Passes a pointer to the struct clk and the previous
1004 * and current rates to the notifier callback. Intended to be called by
1005 * internal clock code only. Returns NOTIFY_DONE from the last driver
1006 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1007 * a driver returns that.
1008 */
1009 static int __clk_notify(struct clk *clk, unsigned long msg,
1010 unsigned long old_rate, unsigned long new_rate)
1011 {
1012 struct clk_notifier *cn;
1013 struct clk_notifier_data cnd;
1014 int ret = NOTIFY_DONE;
1015
1016 cnd.clk = clk;
1017 cnd.old_rate = old_rate;
1018 cnd.new_rate = new_rate;
1019
1020 list_for_each_entry(cn, &clk_notifier_list, node) {
1021 if (cn->clk == clk) {
1022 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1023 &cnd);
1024 break;
1025 }
1026 }
1027
1028 return ret;
1029 }
1030
1031 /**
1032 * __clk_recalc_accuracies
1033 * @clk: first clk in the subtree
1034 *
1035 * Walks the subtree of clks starting with clk and recalculates accuracies as
1036 * it goes. Note that if a clk does not implement the .recalc_accuracy
1037 * callback then it is assumed that the clock will take on the accuracy of it's
1038 * parent.
1039 *
1040 * Caller must hold prepare_lock.
1041 */
1042 static void __clk_recalc_accuracies(struct clk *clk)
1043 {
1044 unsigned long parent_accuracy = 0;
1045 struct clk *child;
1046
1047 if (clk->parent)
1048 parent_accuracy = clk->parent->accuracy;
1049
1050 if (clk->ops->recalc_accuracy)
1051 clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
1052 parent_accuracy);
1053 else
1054 clk->accuracy = parent_accuracy;
1055
1056 hlist_for_each_entry(child, &clk->children, child_node)
1057 __clk_recalc_accuracies(child);
1058 }
1059
1060 /**
1061 * clk_get_accuracy - return the accuracy of clk
1062 * @clk: the clk whose accuracy is being returned
1063 *
1064 * Simply returns the cached accuracy of the clk, unless
1065 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1066 * issued.
1067 * If clk is NULL then returns 0.
1068 */
1069 long clk_get_accuracy(struct clk *clk)
1070 {
1071 unsigned long accuracy;
1072
1073 clk_prepare_lock();
1074 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
1075 __clk_recalc_accuracies(clk);
1076
1077 accuracy = __clk_get_accuracy(clk);
1078 clk_prepare_unlock();
1079
1080 return accuracy;
1081 }
1082 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1083
1084 static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
1085 {
1086 if (clk->ops->recalc_rate)
1087 return clk->ops->recalc_rate(clk->hw, parent_rate);
1088 return parent_rate;
1089 }
1090
1091 /**
1092 * __clk_recalc_rates
1093 * @clk: first clk in the subtree
1094 * @msg: notification type (see include/linux/clk.h)
1095 *
1096 * Walks the subtree of clks starting with clk and recalculates rates as it
1097 * goes. Note that if a clk does not implement the .recalc_rate callback then
1098 * it is assumed that the clock will take on the rate of its parent.
1099 *
1100 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1101 * if necessary.
1102 *
1103 * Caller must hold prepare_lock.
1104 */
1105 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
1106 {
1107 unsigned long old_rate;
1108 unsigned long parent_rate = 0;
1109 struct clk *child;
1110
1111 old_rate = clk->rate;
1112
1113 if (clk->parent)
1114 parent_rate = clk->parent->rate;
1115
1116 clk->rate = clk_recalc(clk, parent_rate);
1117
1118 /*
1119 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1120 * & ABORT_RATE_CHANGE notifiers
1121 */
1122 if (clk->notifier_count && msg)
1123 __clk_notify(clk, msg, old_rate, clk->rate);
1124
1125 hlist_for_each_entry(child, &clk->children, child_node)
1126 __clk_recalc_rates(child, msg);
1127 }
1128
1129 /**
1130 * clk_get_rate - return the rate of clk
1131 * @clk: the clk whose rate is being returned
1132 *
1133 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1134 * is set, which means a recalc_rate will be issued.
1135 * If clk is NULL then returns 0.
1136 */
1137 unsigned long clk_get_rate(struct clk *clk)
1138 {
1139 unsigned long rate;
1140
1141 clk_prepare_lock();
1142
1143 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
1144 __clk_recalc_rates(clk, 0);
1145
1146 rate = __clk_get_rate(clk);
1147 clk_prepare_unlock();
1148
1149 return rate;
1150 }
1151 EXPORT_SYMBOL_GPL(clk_get_rate);
1152
1153 static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1154 {
1155 int i;
1156
1157 if (!clk->parents) {
1158 clk->parents = kcalloc(clk->num_parents,
1159 sizeof(struct clk *), GFP_KERNEL);
1160 if (!clk->parents)
1161 return -ENOMEM;
1162 }
1163
1164 /*
1165 * find index of new parent clock using cached parent ptrs,
1166 * or if not yet cached, use string name comparison and cache
1167 * them now to avoid future calls to __clk_lookup.
1168 */
1169 for (i = 0; i < clk->num_parents; i++) {
1170 if (clk->parents[i] == parent)
1171 return i;
1172
1173 if (clk->parents[i])
1174 continue;
1175
1176 if (!strcmp(clk->parent_names[i], parent->name)) {
1177 clk->parents[i] = __clk_lookup(parent->name);
1178 return i;
1179 }
1180 }
1181
1182 return -EINVAL;
1183 }
1184
1185 static void clk_reparent(struct clk *clk, struct clk *new_parent)
1186 {
1187 hlist_del(&clk->child_node);
1188
1189 if (new_parent) {
1190 /* avoid duplicate POST_RATE_CHANGE notifications */
1191 if (new_parent->new_child == clk)
1192 new_parent->new_child = NULL;
1193
1194 hlist_add_head(&clk->child_node, &new_parent->children);
1195 } else {
1196 hlist_add_head(&clk->child_node, &clk_orphan_list);
1197 }
1198
1199 clk->parent = new_parent;
1200 }
1201
1202 static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
1203 {
1204 unsigned long flags;
1205 struct clk *old_parent = clk->parent;
1206
1207 /*
1208 * Migrate prepare state between parents and prevent race with
1209 * clk_enable().
1210 *
1211 * If the clock is not prepared, then a race with
1212 * clk_enable/disable() is impossible since we already have the
1213 * prepare lock (future calls to clk_enable() need to be preceded by
1214 * a clk_prepare()).
1215 *
1216 * If the clock is prepared, migrate the prepared state to the new
1217 * parent and also protect against a race with clk_enable() by
1218 * forcing the clock and the new parent on. This ensures that all
1219 * future calls to clk_enable() are practically NOPs with respect to
1220 * hardware and software states.
1221 *
1222 * See also: Comment for clk_set_parent() below.
1223 */
1224 if (clk->prepare_count) {
1225 __clk_prepare(parent);
1226 clk_enable(parent);
1227 clk_enable(clk);
1228 }
1229
1230 /* update the clk tree topology */
1231 flags = clk_enable_lock();
1232 clk_reparent(clk, parent);
1233 clk_enable_unlock(flags);
1234
1235 return old_parent;
1236 }
1237
1238 static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
1239 struct clk *old_parent)
1240 {
1241 /*
1242 * Finish the migration of prepare state and undo the changes done
1243 * for preventing a race with clk_enable().
1244 */
1245 if (clk->prepare_count) {
1246 clk_disable(clk);
1247 clk_disable(old_parent);
1248 __clk_unprepare(old_parent);
1249 }
1250 }
1251
1252 static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1253 {
1254 unsigned long flags;
1255 int ret = 0;
1256 struct clk *old_parent;
1257
1258 old_parent = __clk_set_parent_before(clk, parent);
1259
1260 /* change clock input source */
1261 if (parent && clk->ops->set_parent)
1262 ret = clk->ops->set_parent(clk->hw, p_index);
1263
1264 if (ret) {
1265 flags = clk_enable_lock();
1266 clk_reparent(clk, old_parent);
1267 clk_enable_unlock(flags);
1268
1269 if (clk->prepare_count) {
1270 clk_disable(clk);
1271 clk_disable(parent);
1272 __clk_unprepare(parent);
1273 }
1274 return ret;
1275 }
1276
1277 __clk_set_parent_after(clk, parent, old_parent);
1278
1279 return 0;
1280 }
1281
1282 /**
1283 * __clk_speculate_rates
1284 * @clk: first clk in the subtree
1285 * @parent_rate: the "future" rate of clk's parent
1286 *
1287 * Walks the subtree of clks starting with clk, speculating rates as it
1288 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1289 *
1290 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1291 * pre-rate change notifications and returns early if no clks in the
1292 * subtree have subscribed to the notifications. Note that if a clk does not
1293 * implement the .recalc_rate callback then it is assumed that the clock will
1294 * take on the rate of its parent.
1295 *
1296 * Caller must hold prepare_lock.
1297 */
1298 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
1299 {
1300 struct clk *child;
1301 unsigned long new_rate;
1302 int ret = NOTIFY_DONE;
1303
1304 new_rate = clk_recalc(clk, parent_rate);
1305
1306 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1307 if (clk->notifier_count)
1308 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
1309
1310 if (ret & NOTIFY_STOP_MASK) {
1311 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1312 __func__, clk->name, ret);
1313 goto out;
1314 }
1315
1316 hlist_for_each_entry(child, &clk->children, child_node) {
1317 ret = __clk_speculate_rates(child, new_rate);
1318 if (ret & NOTIFY_STOP_MASK)
1319 break;
1320 }
1321
1322 out:
1323 return ret;
1324 }
1325
1326 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
1327 struct clk *new_parent, u8 p_index)
1328 {
1329 struct clk *child;
1330
1331 clk->new_rate = new_rate;
1332 clk->new_parent = new_parent;
1333 clk->new_parent_index = p_index;
1334 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1335 clk->new_child = NULL;
1336 if (new_parent && new_parent != clk->parent)
1337 new_parent->new_child = clk;
1338
1339 hlist_for_each_entry(child, &clk->children, child_node) {
1340 child->new_rate = clk_recalc(child, new_rate);
1341 clk_calc_subtree(child, child->new_rate, NULL, 0);
1342 }
1343 }
1344
1345 /*
1346 * calculate the new rates returning the topmost clock that has to be
1347 * changed.
1348 */
1349 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1350 {
1351 struct clk *top = clk;
1352 struct clk *old_parent, *parent;
1353 unsigned long best_parent_rate = 0;
1354 unsigned long new_rate;
1355 int p_index = 0;
1356
1357 /* sanity */
1358 if (IS_ERR_OR_NULL(clk))
1359 return NULL;
1360
1361 /* save parent rate, if it exists */
1362 parent = old_parent = clk->parent;
1363 if (parent)
1364 best_parent_rate = parent->rate;
1365
1366 /* find the closest rate and parent clk/rate */
1367 if (clk->ops->determine_rate) {
1368 new_rate = clk->ops->determine_rate(clk->hw, rate,
1369 &best_parent_rate,
1370 &parent);
1371 } else if (clk->ops->round_rate) {
1372 new_rate = clk->ops->round_rate(clk->hw, rate,
1373 &best_parent_rate);
1374 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
1375 /* pass-through clock without adjustable parent */
1376 clk->new_rate = clk->rate;
1377 return NULL;
1378 } else {
1379 /* pass-through clock with adjustable parent */
1380 top = clk_calc_new_rates(parent, rate);
1381 new_rate = parent->new_rate;
1382 goto out;
1383 }
1384
1385 /* some clocks must be gated to change parent */
1386 if (parent != old_parent &&
1387 (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1388 pr_debug("%s: %s not gated but wants to reparent\n",
1389 __func__, clk->name);
1390 return NULL;
1391 }
1392
1393 /* try finding the new parent index */
1394 if (parent) {
1395 p_index = clk_fetch_parent_index(clk, parent);
1396 if (p_index < 0) {
1397 pr_debug("%s: clk %s can not be parent of clk %s\n",
1398 __func__, parent->name, clk->name);
1399 return NULL;
1400 }
1401 }
1402
1403 if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
1404 best_parent_rate != parent->rate)
1405 top = clk_calc_new_rates(parent, best_parent_rate);
1406
1407 out:
1408 clk_calc_subtree(clk, new_rate, parent, p_index);
1409
1410 return top;
1411 }
1412
1413 /*
1414 * Notify about rate changes in a subtree. Always walk down the whole tree
1415 * so that in case of an error we can walk down the whole tree again and
1416 * abort the change.
1417 */
1418 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
1419 {
1420 struct clk *child, *tmp_clk, *fail_clk = NULL;
1421 int ret = NOTIFY_DONE;
1422
1423 if (clk->rate == clk->new_rate)
1424 return NULL;
1425
1426 if (clk->notifier_count) {
1427 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
1428 if (ret & NOTIFY_STOP_MASK)
1429 fail_clk = clk;
1430 }
1431
1432 hlist_for_each_entry(child, &clk->children, child_node) {
1433 /* Skip children who will be reparented to another clock */
1434 if (child->new_parent && child->new_parent != clk)
1435 continue;
1436 tmp_clk = clk_propagate_rate_change(child, event);
1437 if (tmp_clk)
1438 fail_clk = tmp_clk;
1439 }
1440
1441 /* handle the new child who might not be in clk->children yet */
1442 if (clk->new_child) {
1443 tmp_clk = clk_propagate_rate_change(clk->new_child, event);
1444 if (tmp_clk)
1445 fail_clk = tmp_clk;
1446 }
1447
1448 return fail_clk;
1449 }
1450
1451 /*
1452 * walk down a subtree and set the new rates notifying the rate
1453 * change on the way
1454 */
1455 static void clk_change_rate(struct clk *clk)
1456 {
1457 struct clk *child;
1458 struct hlist_node *tmp;
1459 unsigned long old_rate;
1460 unsigned long best_parent_rate = 0;
1461 bool skip_set_rate = false;
1462 struct clk *old_parent;
1463
1464 old_rate = clk->rate;
1465
1466 if (clk->new_parent)
1467 best_parent_rate = clk->new_parent->rate;
1468 else if (clk->parent)
1469 best_parent_rate = clk->parent->rate;
1470
1471 if (clk->new_parent && clk->new_parent != clk->parent) {
1472 old_parent = __clk_set_parent_before(clk, clk->new_parent);
1473
1474 if (clk->ops->set_rate_and_parent) {
1475 skip_set_rate = true;
1476 clk->ops->set_rate_and_parent(clk->hw, clk->new_rate,
1477 best_parent_rate,
1478 clk->new_parent_index);
1479 } else if (clk->ops->set_parent) {
1480 clk->ops->set_parent(clk->hw, clk->new_parent_index);
1481 }
1482
1483 __clk_set_parent_after(clk, clk->new_parent, old_parent);
1484 }
1485
1486 if (!skip_set_rate && clk->ops->set_rate)
1487 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
1488
1489 clk->rate = clk_recalc(clk, best_parent_rate);
1490
1491 if (clk->notifier_count && old_rate != clk->rate)
1492 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1493
1494 /*
1495 * Use safe iteration, as change_rate can actually swap parents
1496 * for certain clock types.
1497 */
1498 hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
1499 /* Skip children who will be reparented to another clock */
1500 if (child->new_parent && child->new_parent != clk)
1501 continue;
1502 clk_change_rate(child);
1503 }
1504
1505 /* handle the new child who might not be in clk->children yet */
1506 if (clk->new_child)
1507 clk_change_rate(clk->new_child);
1508 }
1509
1510 /**
1511 * clk_set_rate - specify a new rate for clk
1512 * @clk: the clk whose rate is being changed
1513 * @rate: the new rate for clk
1514 *
1515 * In the simplest case clk_set_rate will only adjust the rate of clk.
1516 *
1517 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1518 * propagate up to clk's parent; whether or not this happens depends on the
1519 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1520 * after calling .round_rate then upstream parent propagation is ignored. If
1521 * *parent_rate comes back with a new rate for clk's parent then we propagate
1522 * up to clk's parent and set its rate. Upward propagation will continue
1523 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1524 * .round_rate stops requesting changes to clk's parent_rate.
1525 *
1526 * Rate changes are accomplished via tree traversal that also recalculates the
1527 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1528 *
1529 * Returns 0 on success, -EERROR otherwise.
1530 */
1531 int clk_set_rate(struct clk *clk, unsigned long rate)
1532 {
1533 struct clk *top, *fail_clk;
1534 int ret = 0;
1535
1536 if (!clk)
1537 return 0;
1538
1539 /* prevent racing with updates to the clock topology */
1540 clk_prepare_lock();
1541
1542 /* bail early if nothing to do */
1543 if (rate == clk_get_rate(clk))
1544 goto out;
1545
1546 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
1547 ret = -EBUSY;
1548 goto out;
1549 }
1550
1551 /* calculate new rates and get the topmost changed clock */
1552 top = clk_calc_new_rates(clk, rate);
1553 if (!top) {
1554 ret = -EINVAL;
1555 goto out;
1556 }
1557
1558 /* notify that we are about to change rates */
1559 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1560 if (fail_clk) {
1561 pr_debug("%s: failed to set %s rate\n", __func__,
1562 fail_clk->name);
1563 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1564 ret = -EBUSY;
1565 goto out;
1566 }
1567
1568 /* change the rates */
1569 clk_change_rate(top);
1570
1571 out:
1572 clk_prepare_unlock();
1573
1574 return ret;
1575 }
1576 EXPORT_SYMBOL_GPL(clk_set_rate);
1577
1578 /**
1579 * clk_get_parent - return the parent of a clk
1580 * @clk: the clk whose parent gets returned
1581 *
1582 * Simply returns clk->parent. Returns NULL if clk is NULL.
1583 */
1584 struct clk *clk_get_parent(struct clk *clk)
1585 {
1586 struct clk *parent;
1587
1588 clk_prepare_lock();
1589 parent = __clk_get_parent(clk);
1590 clk_prepare_unlock();
1591
1592 return parent;
1593 }
1594 EXPORT_SYMBOL_GPL(clk_get_parent);
1595
1596 /*
1597 * .get_parent is mandatory for clocks with multiple possible parents. It is
1598 * optional for single-parent clocks. Always call .get_parent if it is
1599 * available and WARN if it is missing for multi-parent clocks.
1600 *
1601 * For single-parent clocks without .get_parent, first check to see if the
1602 * .parents array exists, and if so use it to avoid an expensive tree
1603 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
1604 */
1605 static struct clk *__clk_init_parent(struct clk *clk)
1606 {
1607 struct clk *ret = NULL;
1608 u8 index;
1609
1610 /* handle the trivial cases */
1611
1612 if (!clk->num_parents)
1613 goto out;
1614
1615 if (clk->num_parents == 1) {
1616 if (IS_ERR_OR_NULL(clk->parent))
1617 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1618 ret = clk->parent;
1619 goto out;
1620 }
1621
1622 if (!clk->ops->get_parent) {
1623 WARN(!clk->ops->get_parent,
1624 "%s: multi-parent clocks must implement .get_parent\n",
1625 __func__);
1626 goto out;
1627 };
1628
1629 /*
1630 * Do our best to cache parent clocks in clk->parents. This prevents
1631 * unnecessary and expensive calls to __clk_lookup. We don't set
1632 * clk->parent here; that is done by the calling function
1633 */
1634
1635 index = clk->ops->get_parent(clk->hw);
1636
1637 if (!clk->parents)
1638 clk->parents =
1639 kcalloc(clk->num_parents, sizeof(struct clk *),
1640 GFP_KERNEL);
1641
1642 ret = clk_get_parent_by_index(clk, index);
1643
1644 out:
1645 return ret;
1646 }
1647
1648 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1649 {
1650 clk_reparent(clk, new_parent);
1651 __clk_recalc_accuracies(clk);
1652 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1653 }
1654
1655 /**
1656 * clk_set_parent - switch the parent of a mux clk
1657 * @clk: the mux clk whose input we are switching
1658 * @parent: the new input to clk
1659 *
1660 * Re-parent clk to use parent as its new input source. If clk is in
1661 * prepared state, the clk will get enabled for the duration of this call. If
1662 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1663 * that, the reparenting is glitchy in hardware, etc), use the
1664 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1665 *
1666 * After successfully changing clk's parent clk_set_parent will update the
1667 * clk topology, sysfs topology and propagate rate recalculation via
1668 * __clk_recalc_rates.
1669 *
1670 * Returns 0 on success, -EERROR otherwise.
1671 */
1672 int clk_set_parent(struct clk *clk, struct clk *parent)
1673 {
1674 int ret = 0;
1675 int p_index = 0;
1676 unsigned long p_rate = 0;
1677
1678 if (!clk)
1679 return 0;
1680
1681 /* verify ops for for multi-parent clks */
1682 if ((clk->num_parents > 1) && (!clk->ops->set_parent))
1683 return -ENOSYS;
1684
1685 /* prevent racing with updates to the clock topology */
1686 clk_prepare_lock();
1687
1688 if (clk->parent == parent)
1689 goto out;
1690
1691 /* check that we are allowed to re-parent if the clock is in use */
1692 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1693 ret = -EBUSY;
1694 goto out;
1695 }
1696
1697 /* try finding the new parent index */
1698 if (parent) {
1699 p_index = clk_fetch_parent_index(clk, parent);
1700 p_rate = parent->rate;
1701 if (p_index < 0) {
1702 pr_debug("%s: clk %s can not be parent of clk %s\n",
1703 __func__, parent->name, clk->name);
1704 ret = p_index;
1705 goto out;
1706 }
1707 }
1708
1709 /* propagate PRE_RATE_CHANGE notifications */
1710 ret = __clk_speculate_rates(clk, p_rate);
1711
1712 /* abort if a driver objects */
1713 if (ret & NOTIFY_STOP_MASK)
1714 goto out;
1715
1716 /* do the re-parent */
1717 ret = __clk_set_parent(clk, parent, p_index);
1718
1719 /* propagate rate an accuracy recalculation accordingly */
1720 if (ret) {
1721 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1722 } else {
1723 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1724 __clk_recalc_accuracies(clk);
1725 }
1726
1727 out:
1728 clk_prepare_unlock();
1729
1730 return ret;
1731 }
1732 EXPORT_SYMBOL_GPL(clk_set_parent);
1733
1734 /**
1735 * clk_set_phase - adjust the phase shift of a clock signal
1736 * @clk: clock signal source
1737 * @degrees: number of degrees the signal is shifted
1738 *
1739 * Shifts the phase of a clock signal by the specified
1740 * degrees. Returns 0 on success, -EERROR otherwise.
1741 *
1742 * This function makes no distinction about the input or reference
1743 * signal that we adjust the clock signal phase against. For example
1744 * phase locked-loop clock signal generators we may shift phase with
1745 * respect to feedback clock signal input, but for other cases the
1746 * clock phase may be shifted with respect to some other, unspecified
1747 * signal.
1748 *
1749 * Additionally the concept of phase shift does not propagate through
1750 * the clock tree hierarchy, which sets it apart from clock rates and
1751 * clock accuracy. A parent clock phase attribute does not have an
1752 * impact on the phase attribute of a child clock.
1753 */
1754 int clk_set_phase(struct clk *clk, int degrees)
1755 {
1756 int ret = 0;
1757
1758 if (!clk)
1759 goto out;
1760
1761 /* sanity check degrees */
1762 degrees %= 360;
1763 if (degrees < 0)
1764 degrees += 360;
1765
1766 clk_prepare_lock();
1767
1768 if (!clk->ops->set_phase)
1769 goto out_unlock;
1770
1771 ret = clk->ops->set_phase(clk->hw, degrees);
1772
1773 if (!ret)
1774 clk->phase = degrees;
1775
1776 out_unlock:
1777 clk_prepare_unlock();
1778
1779 out:
1780 return ret;
1781 }
1782
1783 /**
1784 * clk_get_phase - return the phase shift of a clock signal
1785 * @clk: clock signal source
1786 *
1787 * Returns the phase shift of a clock node in degrees, otherwise returns
1788 * -EERROR.
1789 */
1790 int clk_get_phase(struct clk *clk)
1791 {
1792 int ret = 0;
1793
1794 if (!clk)
1795 goto out;
1796
1797 clk_prepare_lock();
1798 ret = clk->phase;
1799 clk_prepare_unlock();
1800
1801 out:
1802 return ret;
1803 }
1804
1805 /**
1806 * __clk_init - initialize the data structures in a struct clk
1807 * @dev: device initializing this clk, placeholder for now
1808 * @clk: clk being initialized
1809 *
1810 * Initializes the lists in struct clk, queries the hardware for the
1811 * parent and rate and sets them both.
1812 */
1813 int __clk_init(struct device *dev, struct clk *clk)
1814 {
1815 int i, ret = 0;
1816 struct clk *orphan;
1817 struct hlist_node *tmp2;
1818
1819 if (!clk)
1820 return -EINVAL;
1821
1822 clk_prepare_lock();
1823
1824 /* check to see if a clock with this name is already registered */
1825 if (__clk_lookup(clk->name)) {
1826 pr_debug("%s: clk %s already initialized\n",
1827 __func__, clk->name);
1828 ret = -EEXIST;
1829 goto out;
1830 }
1831
1832 /* check that clk_ops are sane. See Documentation/clk.txt */
1833 if (clk->ops->set_rate &&
1834 !((clk->ops->round_rate || clk->ops->determine_rate) &&
1835 clk->ops->recalc_rate)) {
1836 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
1837 __func__, clk->name);
1838 ret = -EINVAL;
1839 goto out;
1840 }
1841
1842 if (clk->ops->set_parent && !clk->ops->get_parent) {
1843 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1844 __func__, clk->name);
1845 ret = -EINVAL;
1846 goto out;
1847 }
1848
1849 if (clk->ops->set_rate_and_parent &&
1850 !(clk->ops->set_parent && clk->ops->set_rate)) {
1851 pr_warn("%s: %s must implement .set_parent & .set_rate\n",
1852 __func__, clk->name);
1853 ret = -EINVAL;
1854 goto out;
1855 }
1856
1857 /* throw a WARN if any entries in parent_names are NULL */
1858 for (i = 0; i < clk->num_parents; i++)
1859 WARN(!clk->parent_names[i],
1860 "%s: invalid NULL in %s's .parent_names\n",
1861 __func__, clk->name);
1862
1863 /*
1864 * Allocate an array of struct clk *'s to avoid unnecessary string
1865 * look-ups of clk's possible parents. This can fail for clocks passed
1866 * in to clk_init during early boot; thus any access to clk->parents[]
1867 * must always check for a NULL pointer and try to populate it if
1868 * necessary.
1869 *
1870 * If clk->parents is not NULL we skip this entire block. This allows
1871 * for clock drivers to statically initialize clk->parents.
1872 */
1873 if (clk->num_parents > 1 && !clk->parents) {
1874 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
1875 GFP_KERNEL);
1876 /*
1877 * __clk_lookup returns NULL for parents that have not been
1878 * clk_init'd; thus any access to clk->parents[] must check
1879 * for a NULL pointer. We can always perform lazy lookups for
1880 * missing parents later on.
1881 */
1882 if (clk->parents)
1883 for (i = 0; i < clk->num_parents; i++)
1884 clk->parents[i] =
1885 __clk_lookup(clk->parent_names[i]);
1886 }
1887
1888 clk->parent = __clk_init_parent(clk);
1889
1890 /*
1891 * Populate clk->parent if parent has already been __clk_init'd. If
1892 * parent has not yet been __clk_init'd then place clk in the orphan
1893 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1894 * clk list.
1895 *
1896 * Every time a new clk is clk_init'd then we walk the list of orphan
1897 * clocks and re-parent any that are children of the clock currently
1898 * being clk_init'd.
1899 */
1900 if (clk->parent)
1901 hlist_add_head(&clk->child_node,
1902 &clk->parent->children);
1903 else if (clk->flags & CLK_IS_ROOT)
1904 hlist_add_head(&clk->child_node, &clk_root_list);
1905 else
1906 hlist_add_head(&clk->child_node, &clk_orphan_list);
1907
1908 /*
1909 * Set clk's accuracy. The preferred method is to use
1910 * .recalc_accuracy. For simple clocks and lazy developers the default
1911 * fallback is to use the parent's accuracy. If a clock doesn't have a
1912 * parent (or is orphaned) then accuracy is set to zero (perfect
1913 * clock).
1914 */
1915 if (clk->ops->recalc_accuracy)
1916 clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
1917 __clk_get_accuracy(clk->parent));
1918 else if (clk->parent)
1919 clk->accuracy = clk->parent->accuracy;
1920 else
1921 clk->accuracy = 0;
1922
1923 /*
1924 * Set clk's phase.
1925 * Since a phase is by definition relative to its parent, just
1926 * query the current clock phase, or just assume it's in phase.
1927 */
1928 if (clk->ops->get_phase)
1929 clk->phase = clk->ops->get_phase(clk->hw);
1930 else
1931 clk->phase = 0;
1932
1933 /*
1934 * Set clk's rate. The preferred method is to use .recalc_rate. For
1935 * simple clocks and lazy developers the default fallback is to use the
1936 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1937 * then rate is set to zero.
1938 */
1939 if (clk->ops->recalc_rate)
1940 clk->rate = clk->ops->recalc_rate(clk->hw,
1941 __clk_get_rate(clk->parent));
1942 else if (clk->parent)
1943 clk->rate = clk->parent->rate;
1944 else
1945 clk->rate = 0;
1946
1947 clk_debug_register(clk);
1948 /*
1949 * walk the list of orphan clocks and reparent any that are children of
1950 * this clock
1951 */
1952 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
1953 if (orphan->num_parents && orphan->ops->get_parent) {
1954 i = orphan->ops->get_parent(orphan->hw);
1955 if (!strcmp(clk->name, orphan->parent_names[i]))
1956 __clk_reparent(orphan, clk);
1957 continue;
1958 }
1959
1960 for (i = 0; i < orphan->num_parents; i++)
1961 if (!strcmp(clk->name, orphan->parent_names[i])) {
1962 __clk_reparent(orphan, clk);
1963 break;
1964 }
1965 }
1966
1967 /*
1968 * optional platform-specific magic
1969 *
1970 * The .init callback is not used by any of the basic clock types, but
1971 * exists for weird hardware that must perform initialization magic.
1972 * Please consider other ways of solving initialization problems before
1973 * using this callback, as its use is discouraged.
1974 */
1975 if (clk->ops->init)
1976 clk->ops->init(clk->hw);
1977
1978 kref_init(&clk->ref);
1979 out:
1980 clk_prepare_unlock();
1981
1982 return ret;
1983 }
1984
1985 /**
1986 * __clk_register - register a clock and return a cookie.
1987 *
1988 * Same as clk_register, except that the .clk field inside hw shall point to a
1989 * preallocated (generally statically allocated) struct clk. None of the fields
1990 * of the struct clk need to be initialized.
1991 *
1992 * The data pointed to by .init and .clk field shall NOT be marked as init
1993 * data.
1994 *
1995 * __clk_register is only exposed via clk-private.h and is intended for use with
1996 * very large numbers of clocks that need to be statically initialized. It is
1997 * a layering violation to include clk-private.h from any code which implements
1998 * a clock's .ops; as such any statically initialized clock data MUST be in a
1999 * separate C file from the logic that implements its operations. Returns 0
2000 * on success, otherwise an error code.
2001 */
2002 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
2003 {
2004 int ret;
2005 struct clk *clk;
2006
2007 clk = hw->clk;
2008 clk->name = hw->init->name;
2009 clk->ops = hw->init->ops;
2010 clk->hw = hw;
2011 clk->flags = hw->init->flags;
2012 clk->parent_names = hw->init->parent_names;
2013 clk->num_parents = hw->init->num_parents;
2014 if (dev && dev->driver)
2015 clk->owner = dev->driver->owner;
2016 else
2017 clk->owner = NULL;
2018
2019 ret = __clk_init(dev, clk);
2020 if (ret)
2021 return ERR_PTR(ret);
2022
2023 return clk;
2024 }
2025 EXPORT_SYMBOL_GPL(__clk_register);
2026
2027 /**
2028 * clk_register - allocate a new clock, register it and return an opaque cookie
2029 * @dev: device that is registering this clock
2030 * @hw: link to hardware-specific clock data
2031 *
2032 * clk_register is the primary interface for populating the clock tree with new
2033 * clock nodes. It returns a pointer to the newly allocated struct clk which
2034 * cannot be dereferenced by driver code but may be used in conjuction with the
2035 * rest of the clock API. In the event of an error clk_register will return an
2036 * error code; drivers must test for an error code after calling clk_register.
2037 */
2038 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2039 {
2040 int i, ret;
2041 struct clk *clk;
2042
2043 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2044 if (!clk) {
2045 pr_err("%s: could not allocate clk\n", __func__);
2046 ret = -ENOMEM;
2047 goto fail_out;
2048 }
2049
2050 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
2051 if (!clk->name) {
2052 pr_err("%s: could not allocate clk->name\n", __func__);
2053 ret = -ENOMEM;
2054 goto fail_name;
2055 }
2056 clk->ops = hw->init->ops;
2057 if (dev && dev->driver)
2058 clk->owner = dev->driver->owner;
2059 clk->hw = hw;
2060 clk->flags = hw->init->flags;
2061 clk->num_parents = hw->init->num_parents;
2062 hw->clk = clk;
2063
2064 /* allocate local copy in case parent_names is __initdata */
2065 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
2066 GFP_KERNEL);
2067
2068 if (!clk->parent_names) {
2069 pr_err("%s: could not allocate clk->parent_names\n", __func__);
2070 ret = -ENOMEM;
2071 goto fail_parent_names;
2072 }
2073
2074
2075 /* copy each string name in case parent_names is __initdata */
2076 for (i = 0; i < clk->num_parents; i++) {
2077 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
2078 GFP_KERNEL);
2079 if (!clk->parent_names[i]) {
2080 pr_err("%s: could not copy parent_names\n", __func__);
2081 ret = -ENOMEM;
2082 goto fail_parent_names_copy;
2083 }
2084 }
2085
2086 ret = __clk_init(dev, clk);
2087 if (!ret)
2088 return clk;
2089
2090 fail_parent_names_copy:
2091 while (--i >= 0)
2092 kfree(clk->parent_names[i]);
2093 kfree(clk->parent_names);
2094 fail_parent_names:
2095 kfree(clk->name);
2096 fail_name:
2097 kfree(clk);
2098 fail_out:
2099 return ERR_PTR(ret);
2100 }
2101 EXPORT_SYMBOL_GPL(clk_register);
2102
2103 /*
2104 * Free memory allocated for a clock.
2105 * Caller must hold prepare_lock.
2106 */
2107 static void __clk_release(struct kref *ref)
2108 {
2109 struct clk *clk = container_of(ref, struct clk, ref);
2110 int i = clk->num_parents;
2111
2112 kfree(clk->parents);
2113 while (--i >= 0)
2114 kfree(clk->parent_names[i]);
2115
2116 kfree(clk->parent_names);
2117 kfree(clk->name);
2118 kfree(clk);
2119 }
2120
2121 /*
2122 * Empty clk_ops for unregistered clocks. These are used temporarily
2123 * after clk_unregister() was called on a clock and until last clock
2124 * consumer calls clk_put() and the struct clk object is freed.
2125 */
2126 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2127 {
2128 return -ENXIO;
2129 }
2130
2131 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2132 {
2133 WARN_ON_ONCE(1);
2134 }
2135
2136 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2137 unsigned long parent_rate)
2138 {
2139 return -ENXIO;
2140 }
2141
2142 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2143 {
2144 return -ENXIO;
2145 }
2146
2147 static const struct clk_ops clk_nodrv_ops = {
2148 .enable = clk_nodrv_prepare_enable,
2149 .disable = clk_nodrv_disable_unprepare,
2150 .prepare = clk_nodrv_prepare_enable,
2151 .unprepare = clk_nodrv_disable_unprepare,
2152 .set_rate = clk_nodrv_set_rate,
2153 .set_parent = clk_nodrv_set_parent,
2154 };
2155
2156 /**
2157 * clk_unregister - unregister a currently registered clock
2158 * @clk: clock to unregister
2159 */
2160 void clk_unregister(struct clk *clk)
2161 {
2162 unsigned long flags;
2163
2164 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2165 return;
2166
2167 clk_debug_unregister(clk);
2168
2169 clk_prepare_lock();
2170
2171 if (clk->ops == &clk_nodrv_ops) {
2172 pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
2173 return;
2174 }
2175 /*
2176 * Assign empty clock ops for consumers that might still hold
2177 * a reference to this clock.
2178 */
2179 flags = clk_enable_lock();
2180 clk->ops = &clk_nodrv_ops;
2181 clk_enable_unlock(flags);
2182
2183 if (!hlist_empty(&clk->children)) {
2184 struct clk *child;
2185 struct hlist_node *t;
2186
2187 /* Reparent all children to the orphan list. */
2188 hlist_for_each_entry_safe(child, t, &clk->children, child_node)
2189 clk_set_parent(child, NULL);
2190 }
2191
2192 hlist_del_init(&clk->child_node);
2193
2194 if (clk->prepare_count)
2195 pr_warn("%s: unregistering prepared clock: %s\n",
2196 __func__, clk->name);
2197 kref_put(&clk->ref, __clk_release);
2198
2199 clk_prepare_unlock();
2200 }
2201 EXPORT_SYMBOL_GPL(clk_unregister);
2202
2203 static void devm_clk_release(struct device *dev, void *res)
2204 {
2205 clk_unregister(*(struct clk **)res);
2206 }
2207
2208 /**
2209 * devm_clk_register - resource managed clk_register()
2210 * @dev: device that is registering this clock
2211 * @hw: link to hardware-specific clock data
2212 *
2213 * Managed clk_register(). Clocks returned from this function are
2214 * automatically clk_unregister()ed on driver detach. See clk_register() for
2215 * more information.
2216 */
2217 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2218 {
2219 struct clk *clk;
2220 struct clk **clkp;
2221
2222 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2223 if (!clkp)
2224 return ERR_PTR(-ENOMEM);
2225
2226 clk = clk_register(dev, hw);
2227 if (!IS_ERR(clk)) {
2228 *clkp = clk;
2229 devres_add(dev, clkp);
2230 } else {
2231 devres_free(clkp);
2232 }
2233
2234 return clk;
2235 }
2236 EXPORT_SYMBOL_GPL(devm_clk_register);
2237
2238 static int devm_clk_match(struct device *dev, void *res, void *data)
2239 {
2240 struct clk *c = res;
2241 if (WARN_ON(!c))
2242 return 0;
2243 return c == data;
2244 }
2245
2246 /**
2247 * devm_clk_unregister - resource managed clk_unregister()
2248 * @clk: clock to unregister
2249 *
2250 * Deallocate a clock allocated with devm_clk_register(). Normally
2251 * this function will not need to be called and the resource management
2252 * code will ensure that the resource is freed.
2253 */
2254 void devm_clk_unregister(struct device *dev, struct clk *clk)
2255 {
2256 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2257 }
2258 EXPORT_SYMBOL_GPL(devm_clk_unregister);
2259
2260 /*
2261 * clkdev helpers
2262 */
2263 int __clk_get(struct clk *clk)
2264 {
2265 if (clk) {
2266 if (!try_module_get(clk->owner))
2267 return 0;
2268
2269 kref_get(&clk->ref);
2270 }
2271 return 1;
2272 }
2273
2274 void __clk_put(struct clk *clk)
2275 {
2276 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2277 return;
2278
2279 clk_prepare_lock();
2280 kref_put(&clk->ref, __clk_release);
2281 clk_prepare_unlock();
2282
2283 module_put(clk->owner);
2284 }
2285
2286 /*** clk rate change notifiers ***/
2287
2288 /**
2289 * clk_notifier_register - add a clk rate change notifier
2290 * @clk: struct clk * to watch
2291 * @nb: struct notifier_block * with callback info
2292 *
2293 * Request notification when clk's rate changes. This uses an SRCU
2294 * notifier because we want it to block and notifier unregistrations are
2295 * uncommon. The callbacks associated with the notifier must not
2296 * re-enter into the clk framework by calling any top-level clk APIs;
2297 * this will cause a nested prepare_lock mutex.
2298 *
2299 * In all notification cases cases (pre, post and abort rate change) the
2300 * original clock rate is passed to the callback via struct
2301 * clk_notifier_data.old_rate and the new frequency is passed via struct
2302 * clk_notifier_data.new_rate.
2303 *
2304 * clk_notifier_register() must be called from non-atomic context.
2305 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2306 * allocation failure; otherwise, passes along the return value of
2307 * srcu_notifier_chain_register().
2308 */
2309 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2310 {
2311 struct clk_notifier *cn;
2312 int ret = -ENOMEM;
2313
2314 if (!clk || !nb)
2315 return -EINVAL;
2316
2317 clk_prepare_lock();
2318
2319 /* search the list of notifiers for this clk */
2320 list_for_each_entry(cn, &clk_notifier_list, node)
2321 if (cn->clk == clk)
2322 break;
2323
2324 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2325 if (cn->clk != clk) {
2326 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2327 if (!cn)
2328 goto out;
2329
2330 cn->clk = clk;
2331 srcu_init_notifier_head(&cn->notifier_head);
2332
2333 list_add(&cn->node, &clk_notifier_list);
2334 }
2335
2336 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2337
2338 clk->notifier_count++;
2339
2340 out:
2341 clk_prepare_unlock();
2342
2343 return ret;
2344 }
2345 EXPORT_SYMBOL_GPL(clk_notifier_register);
2346
2347 /**
2348 * clk_notifier_unregister - remove a clk rate change notifier
2349 * @clk: struct clk *
2350 * @nb: struct notifier_block * with callback info
2351 *
2352 * Request no further notification for changes to 'clk' and frees memory
2353 * allocated in clk_notifier_register.
2354 *
2355 * Returns -EINVAL if called with null arguments; otherwise, passes
2356 * along the return value of srcu_notifier_chain_unregister().
2357 */
2358 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2359 {
2360 struct clk_notifier *cn = NULL;
2361 int ret = -EINVAL;
2362
2363 if (!clk || !nb)
2364 return -EINVAL;
2365
2366 clk_prepare_lock();
2367
2368 list_for_each_entry(cn, &clk_notifier_list, node)
2369 if (cn->clk == clk)
2370 break;
2371
2372 if (cn->clk == clk) {
2373 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2374
2375 clk->notifier_count--;
2376
2377 /* XXX the notifier code should handle this better */
2378 if (!cn->notifier_head.head) {
2379 srcu_cleanup_notifier_head(&cn->notifier_head);
2380 list_del(&cn->node);
2381 kfree(cn);
2382 }
2383
2384 } else {
2385 ret = -ENOENT;
2386 }
2387
2388 clk_prepare_unlock();
2389
2390 return ret;
2391 }
2392 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
2393
2394 #ifdef CONFIG_OF
2395 /**
2396 * struct of_clk_provider - Clock provider registration structure
2397 * @link: Entry in global list of clock providers
2398 * @node: Pointer to device tree node of clock provider
2399 * @get: Get clock callback. Returns NULL or a struct clk for the
2400 * given clock specifier
2401 * @data: context pointer to be passed into @get callback
2402 */
2403 struct of_clk_provider {
2404 struct list_head link;
2405
2406 struct device_node *node;
2407 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2408 void *data;
2409 };
2410
2411 static const struct of_device_id __clk_of_table_sentinel
2412 __used __section(__clk_of_table_end);
2413
2414 static LIST_HEAD(of_clk_providers);
2415 static DEFINE_MUTEX(of_clk_mutex);
2416
2417 /* of_clk_provider list locking helpers */
2418 void of_clk_lock(void)
2419 {
2420 mutex_lock(&of_clk_mutex);
2421 }
2422
2423 void of_clk_unlock(void)
2424 {
2425 mutex_unlock(&of_clk_mutex);
2426 }
2427
2428 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2429 void *data)
2430 {
2431 return data;
2432 }
2433 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2434
2435 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2436 {
2437 struct clk_onecell_data *clk_data = data;
2438 unsigned int idx = clkspec->args[0];
2439
2440 if (idx >= clk_data->clk_num) {
2441 pr_err("%s: invalid clock index %d\n", __func__, idx);
2442 return ERR_PTR(-EINVAL);
2443 }
2444
2445 return clk_data->clks[idx];
2446 }
2447 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2448
2449 /**
2450 * of_clk_add_provider() - Register a clock provider for a node
2451 * @np: Device node pointer associated with clock provider
2452 * @clk_src_get: callback for decoding clock
2453 * @data: context pointer for @clk_src_get callback.
2454 */
2455 int of_clk_add_provider(struct device_node *np,
2456 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2457 void *data),
2458 void *data)
2459 {
2460 struct of_clk_provider *cp;
2461 int ret;
2462
2463 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2464 if (!cp)
2465 return -ENOMEM;
2466
2467 cp->node = of_node_get(np);
2468 cp->data = data;
2469 cp->get = clk_src_get;
2470
2471 mutex_lock(&of_clk_mutex);
2472 list_add(&cp->link, &of_clk_providers);
2473 mutex_unlock(&of_clk_mutex);
2474 pr_debug("Added clock from %s\n", np->full_name);
2475
2476 ret = of_clk_set_defaults(np, true);
2477 if (ret < 0)
2478 of_clk_del_provider(np);
2479
2480 return ret;
2481 }
2482 EXPORT_SYMBOL_GPL(of_clk_add_provider);
2483
2484 /**
2485 * of_clk_del_provider() - Remove a previously registered clock provider
2486 * @np: Device node pointer associated with clock provider
2487 */
2488 void of_clk_del_provider(struct device_node *np)
2489 {
2490 struct of_clk_provider *cp;
2491
2492 mutex_lock(&of_clk_mutex);
2493 list_for_each_entry(cp, &of_clk_providers, link) {
2494 if (cp->node == np) {
2495 list_del(&cp->link);
2496 of_node_put(cp->node);
2497 kfree(cp);
2498 break;
2499 }
2500 }
2501 mutex_unlock(&of_clk_mutex);
2502 }
2503 EXPORT_SYMBOL_GPL(of_clk_del_provider);
2504
2505 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
2506 {
2507 struct of_clk_provider *provider;
2508 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
2509
2510 /* Check if we have such a provider in our array */
2511 list_for_each_entry(provider, &of_clk_providers, link) {
2512 if (provider->node == clkspec->np)
2513 clk = provider->get(clkspec, provider->data);
2514 if (!IS_ERR(clk))
2515 break;
2516 }
2517
2518 return clk;
2519 }
2520
2521 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2522 {
2523 struct clk *clk;
2524
2525 mutex_lock(&of_clk_mutex);
2526 clk = __of_clk_get_from_provider(clkspec);
2527 mutex_unlock(&of_clk_mutex);
2528
2529 return clk;
2530 }
2531
2532 int of_clk_get_parent_count(struct device_node *np)
2533 {
2534 return of_count_phandle_with_args(np, "clocks", "#clock-cells");
2535 }
2536 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
2537
2538 const char *of_clk_get_parent_name(struct device_node *np, int index)
2539 {
2540 struct of_phandle_args clkspec;
2541 struct property *prop;
2542 const char *clk_name;
2543 const __be32 *vp;
2544 u32 pv;
2545 int rc;
2546 int count;
2547
2548 if (index < 0)
2549 return NULL;
2550
2551 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
2552 &clkspec);
2553 if (rc)
2554 return NULL;
2555
2556 index = clkspec.args_count ? clkspec.args[0] : 0;
2557 count = 0;
2558
2559 /* if there is an indices property, use it to transfer the index
2560 * specified into an array offset for the clock-output-names property.
2561 */
2562 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
2563 if (index == pv) {
2564 index = count;
2565 break;
2566 }
2567 count++;
2568 }
2569
2570 if (of_property_read_string_index(clkspec.np, "clock-output-names",
2571 index,
2572 &clk_name) < 0)
2573 clk_name = clkspec.np->name;
2574
2575 of_node_put(clkspec.np);
2576 return clk_name;
2577 }
2578 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
2579
2580 struct clock_provider {
2581 of_clk_init_cb_t clk_init_cb;
2582 struct device_node *np;
2583 struct list_head node;
2584 };
2585
2586 static LIST_HEAD(clk_provider_list);
2587
2588 /*
2589 * This function looks for a parent clock. If there is one, then it
2590 * checks that the provider for this parent clock was initialized, in
2591 * this case the parent clock will be ready.
2592 */
2593 static int parent_ready(struct device_node *np)
2594 {
2595 int i = 0;
2596
2597 while (true) {
2598 struct clk *clk = of_clk_get(np, i);
2599
2600 /* this parent is ready we can check the next one */
2601 if (!IS_ERR(clk)) {
2602 clk_put(clk);
2603 i++;
2604 continue;
2605 }
2606
2607 /* at least one parent is not ready, we exit now */
2608 if (PTR_ERR(clk) == -EPROBE_DEFER)
2609 return 0;
2610
2611 /*
2612 * Here we make assumption that the device tree is
2613 * written correctly. So an error means that there is
2614 * no more parent. As we didn't exit yet, then the
2615 * previous parent are ready. If there is no clock
2616 * parent, no need to wait for them, then we can
2617 * consider their absence as being ready
2618 */
2619 return 1;
2620 }
2621 }
2622
2623 /**
2624 * of_clk_init() - Scan and init clock providers from the DT
2625 * @matches: array of compatible values and init functions for providers.
2626 *
2627 * This function scans the device tree for matching clock providers
2628 * and calls their initialization functions. It also does it by trying
2629 * to follow the dependencies.
2630 */
2631 void __init of_clk_init(const struct of_device_id *matches)
2632 {
2633 const struct of_device_id *match;
2634 struct device_node *np;
2635 struct clock_provider *clk_provider, *next;
2636 bool is_init_done;
2637 bool force = false;
2638
2639 if (!matches)
2640 matches = &__clk_of_table;
2641
2642 /* First prepare the list of the clocks providers */
2643 for_each_matching_node_and_match(np, matches, &match) {
2644 struct clock_provider *parent =
2645 kzalloc(sizeof(struct clock_provider), GFP_KERNEL);
2646
2647 parent->clk_init_cb = match->data;
2648 parent->np = np;
2649 list_add_tail(&parent->node, &clk_provider_list);
2650 }
2651
2652 while (!list_empty(&clk_provider_list)) {
2653 is_init_done = false;
2654 list_for_each_entry_safe(clk_provider, next,
2655 &clk_provider_list, node) {
2656 if (force || parent_ready(clk_provider->np)) {
2657
2658 clk_provider->clk_init_cb(clk_provider->np);
2659 of_clk_set_defaults(clk_provider->np, true);
2660
2661 list_del(&clk_provider->node);
2662 kfree(clk_provider);
2663 is_init_done = true;
2664 }
2665 }
2666
2667 /*
2668 * We didn't manage to initialize any of the
2669 * remaining providers during the last loop, so now we
2670 * initialize all the remaining ones unconditionally
2671 * in case the clock parent was not mandatory
2672 */
2673 if (!is_init_done)
2674 force = true;
2675 }
2676 }
2677 #endif
This page took 0.09088 seconds and 5 git commands to generate.