clk: twl6040: Convert to use devm_clk_register
[deliverable/linux.git] / drivers / clk / clk.c
CommitLineData
b2476490
MT
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
b09d6d99 12#include <linux/clk-provider.h>
86be408b 13#include <linux/clk/clk-conf.h>
b2476490
MT
14#include <linux/module.h>
15#include <linux/mutex.h>
16#include <linux/spinlock.h>
17#include <linux/err.h>
18#include <linux/list.h>
19#include <linux/slab.h>
766e6a4e 20#include <linux/of.h>
46c8773a 21#include <linux/device.h>
f2f6c255 22#include <linux/init.h>
533ddeb1 23#include <linux/sched.h>
562ef0b0 24#include <linux/clkdev.h>
b2476490 25
d6782c26
SN
26#include "clk.h"
27
b2476490
MT
28static DEFINE_SPINLOCK(enable_lock);
29static DEFINE_MUTEX(prepare_lock);
30
533ddeb1
MT
31static struct task_struct *prepare_owner;
32static struct task_struct *enable_owner;
33
34static int prepare_refcnt;
35static int enable_refcnt;
36
b2476490
MT
37static HLIST_HEAD(clk_root_list);
38static HLIST_HEAD(clk_orphan_list);
39static LIST_HEAD(clk_notifier_list);
40
b09d6d99
MT
41/*** private data structures ***/
42
43struct clk_core {
44 const char *name;
45 const struct clk_ops *ops;
46 struct clk_hw *hw;
47 struct module *owner;
48 struct clk_core *parent;
49 const char **parent_names;
50 struct clk_core **parents;
51 u8 num_parents;
52 u8 new_parent_index;
53 unsigned long rate;
1c8e6004 54 unsigned long req_rate;
b09d6d99
MT
55 unsigned long new_rate;
56 struct clk_core *new_parent;
57 struct clk_core *new_child;
58 unsigned long flags;
59 unsigned int enable_count;
60 unsigned int prepare_count;
61 unsigned long accuracy;
62 int phase;
63 struct hlist_head children;
64 struct hlist_node child_node;
1c8e6004 65 struct hlist_head clks;
b09d6d99
MT
66 unsigned int notifier_count;
67#ifdef CONFIG_DEBUG_FS
68 struct dentry *dentry;
8c9a8a8f 69 struct hlist_node debug_node;
b09d6d99
MT
70#endif
71 struct kref ref;
72};
73
dfc202ea
SB
74#define CREATE_TRACE_POINTS
75#include <trace/events/clk.h>
76
b09d6d99
MT
77struct clk {
78 struct clk_core *core;
79 const char *dev_id;
80 const char *con_id;
1c8e6004
TV
81 unsigned long min_rate;
82 unsigned long max_rate;
50595f8b 83 struct hlist_node clks_node;
b09d6d99
MT
84};
85
eab89f69
MT
86/*** locking ***/
87static void clk_prepare_lock(void)
88{
533ddeb1
MT
89 if (!mutex_trylock(&prepare_lock)) {
90 if (prepare_owner == current) {
91 prepare_refcnt++;
92 return;
93 }
94 mutex_lock(&prepare_lock);
95 }
96 WARN_ON_ONCE(prepare_owner != NULL);
97 WARN_ON_ONCE(prepare_refcnt != 0);
98 prepare_owner = current;
99 prepare_refcnt = 1;
eab89f69
MT
100}
101
102static void clk_prepare_unlock(void)
103{
533ddeb1
MT
104 WARN_ON_ONCE(prepare_owner != current);
105 WARN_ON_ONCE(prepare_refcnt == 0);
106
107 if (--prepare_refcnt)
108 return;
109 prepare_owner = NULL;
eab89f69
MT
110 mutex_unlock(&prepare_lock);
111}
112
113static unsigned long clk_enable_lock(void)
114{
115 unsigned long flags;
533ddeb1
MT
116
117 if (!spin_trylock_irqsave(&enable_lock, flags)) {
118 if (enable_owner == current) {
119 enable_refcnt++;
120 return flags;
121 }
122 spin_lock_irqsave(&enable_lock, flags);
123 }
124 WARN_ON_ONCE(enable_owner != NULL);
125 WARN_ON_ONCE(enable_refcnt != 0);
126 enable_owner = current;
127 enable_refcnt = 1;
eab89f69
MT
128 return flags;
129}
130
131static void clk_enable_unlock(unsigned long flags)
132{
533ddeb1
MT
133 WARN_ON_ONCE(enable_owner != current);
134 WARN_ON_ONCE(enable_refcnt == 0);
135
136 if (--enable_refcnt)
137 return;
138 enable_owner = NULL;
eab89f69
MT
139 spin_unlock_irqrestore(&enable_lock, flags);
140}
141
4dff95dc
SB
142static bool clk_core_is_prepared(struct clk_core *core)
143{
144 /*
145 * .is_prepared is optional for clocks that can prepare
146 * fall back to software usage counter if it is missing
147 */
148 if (!core->ops->is_prepared)
149 return core->prepare_count;
b2476490 150
4dff95dc
SB
151 return core->ops->is_prepared(core->hw);
152}
b2476490 153
4dff95dc
SB
154static bool clk_core_is_enabled(struct clk_core *core)
155{
156 /*
157 * .is_enabled is only mandatory for clocks that gate
158 * fall back to software usage counter if .is_enabled is missing
159 */
160 if (!core->ops->is_enabled)
161 return core->enable_count;
6b44c854 162
4dff95dc
SB
163 return core->ops->is_enabled(core->hw);
164}
6b44c854 165
4dff95dc 166static void clk_unprepare_unused_subtree(struct clk_core *core)
1af599df 167{
4dff95dc
SB
168 struct clk_core *child;
169
170 lockdep_assert_held(&prepare_lock);
171
172 hlist_for_each_entry(child, &core->children, child_node)
173 clk_unprepare_unused_subtree(child);
174
175 if (core->prepare_count)
1af599df
PG
176 return;
177
4dff95dc
SB
178 if (core->flags & CLK_IGNORE_UNUSED)
179 return;
180
181 if (clk_core_is_prepared(core)) {
182 trace_clk_unprepare(core);
183 if (core->ops->unprepare_unused)
184 core->ops->unprepare_unused(core->hw);
185 else if (core->ops->unprepare)
186 core->ops->unprepare(core->hw);
187 trace_clk_unprepare_complete(core);
188 }
1af599df
PG
189}
190
4dff95dc 191static void clk_disable_unused_subtree(struct clk_core *core)
1af599df 192{
035a61c3 193 struct clk_core *child;
4dff95dc 194 unsigned long flags;
1af599df 195
4dff95dc 196 lockdep_assert_held(&prepare_lock);
1af599df 197
4dff95dc
SB
198 hlist_for_each_entry(child, &core->children, child_node)
199 clk_disable_unused_subtree(child);
1af599df 200
4dff95dc
SB
201 flags = clk_enable_lock();
202
203 if (core->enable_count)
204 goto unlock_out;
205
206 if (core->flags & CLK_IGNORE_UNUSED)
207 goto unlock_out;
208
209 /*
210 * some gate clocks have special needs during the disable-unused
211 * sequence. call .disable_unused if available, otherwise fall
212 * back to .disable
213 */
214 if (clk_core_is_enabled(core)) {
215 trace_clk_disable(core);
216 if (core->ops->disable_unused)
217 core->ops->disable_unused(core->hw);
218 else if (core->ops->disable)
219 core->ops->disable(core->hw);
220 trace_clk_disable_complete(core);
221 }
222
223unlock_out:
224 clk_enable_unlock(flags);
1af599df
PG
225}
226
4dff95dc
SB
227static bool clk_ignore_unused;
228static int __init clk_ignore_unused_setup(char *__unused)
1af599df 229{
4dff95dc
SB
230 clk_ignore_unused = true;
231 return 1;
232}
233__setup("clk_ignore_unused", clk_ignore_unused_setup);
1af599df 234
4dff95dc
SB
235static int clk_disable_unused(void)
236{
237 struct clk_core *core;
238
239 if (clk_ignore_unused) {
240 pr_warn("clk: Not disabling unused clocks\n");
241 return 0;
242 }
1af599df 243
eab89f69 244 clk_prepare_lock();
1af599df 245
4dff95dc
SB
246 hlist_for_each_entry(core, &clk_root_list, child_node)
247 clk_disable_unused_subtree(core);
248
249 hlist_for_each_entry(core, &clk_orphan_list, child_node)
250 clk_disable_unused_subtree(core);
251
252 hlist_for_each_entry(core, &clk_root_list, child_node)
253 clk_unprepare_unused_subtree(core);
254
255 hlist_for_each_entry(core, &clk_orphan_list, child_node)
256 clk_unprepare_unused_subtree(core);
1af599df 257
eab89f69 258 clk_prepare_unlock();
1af599df
PG
259
260 return 0;
261}
4dff95dc 262late_initcall_sync(clk_disable_unused);
1af599df 263
4dff95dc 264/*** helper functions ***/
1af599df 265
4dff95dc 266const char *__clk_get_name(struct clk *clk)
1af599df 267{
4dff95dc 268 return !clk ? NULL : clk->core->name;
1af599df 269}
4dff95dc 270EXPORT_SYMBOL_GPL(__clk_get_name);
1af599df 271
4dff95dc
SB
272struct clk_hw *__clk_get_hw(struct clk *clk)
273{
274 return !clk ? NULL : clk->core->hw;
275}
276EXPORT_SYMBOL_GPL(__clk_get_hw);
1af599df 277
4dff95dc 278u8 __clk_get_num_parents(struct clk *clk)
bddca894 279{
4dff95dc
SB
280 return !clk ? 0 : clk->core->num_parents;
281}
282EXPORT_SYMBOL_GPL(__clk_get_num_parents);
bddca894 283
4dff95dc
SB
284struct clk *__clk_get_parent(struct clk *clk)
285{
286 if (!clk)
287 return NULL;
288
289 /* TODO: Create a per-user clk and change callers to call clk_put */
290 return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
bddca894 291}
4dff95dc 292EXPORT_SYMBOL_GPL(__clk_get_parent);
bddca894 293
4dff95dc
SB
294static struct clk_core *__clk_lookup_subtree(const char *name,
295 struct clk_core *core)
bddca894 296{
035a61c3 297 struct clk_core *child;
4dff95dc 298 struct clk_core *ret;
bddca894 299
4dff95dc
SB
300 if (!strcmp(core->name, name))
301 return core;
bddca894 302
4dff95dc
SB
303 hlist_for_each_entry(child, &core->children, child_node) {
304 ret = __clk_lookup_subtree(name, child);
305 if (ret)
306 return ret;
bddca894
PG
307 }
308
4dff95dc 309 return NULL;
bddca894
PG
310}
311
4dff95dc 312static struct clk_core *clk_core_lookup(const char *name)
bddca894 313{
4dff95dc
SB
314 struct clk_core *root_clk;
315 struct clk_core *ret;
bddca894 316
4dff95dc
SB
317 if (!name)
318 return NULL;
bddca894 319
4dff95dc
SB
320 /* search the 'proper' clk tree first */
321 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
322 ret = __clk_lookup_subtree(name, root_clk);
323 if (ret)
324 return ret;
bddca894
PG
325 }
326
4dff95dc
SB
327 /* if not found, then search the orphan tree */
328 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
329 ret = __clk_lookup_subtree(name, root_clk);
330 if (ret)
331 return ret;
332 }
bddca894 333
4dff95dc 334 return NULL;
bddca894
PG
335}
336
4dff95dc
SB
337static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
338 u8 index)
bddca894 339{
4dff95dc
SB
340 if (!core || index >= core->num_parents)
341 return NULL;
342 else if (!core->parents)
343 return clk_core_lookup(core->parent_names[index]);
344 else if (!core->parents[index])
345 return core->parents[index] =
346 clk_core_lookup(core->parent_names[index]);
347 else
348 return core->parents[index];
bddca894
PG
349}
350
4dff95dc 351struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
b2476490 352{
4dff95dc 353 struct clk_core *parent;
b2476490 354
4dff95dc
SB
355 if (!clk)
356 return NULL;
b2476490 357
4dff95dc 358 parent = clk_core_get_parent_by_index(clk->core, index);
5279fc40 359
4dff95dc
SB
360 return !parent ? NULL : parent->hw->clk;
361}
362EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
e59c5371 363
4dff95dc
SB
364unsigned int __clk_get_enable_count(struct clk *clk)
365{
366 return !clk ? 0 : clk->core->enable_count;
367}
b2476490 368
4dff95dc
SB
369static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
370{
371 unsigned long ret;
b2476490 372
4dff95dc
SB
373 if (!core) {
374 ret = 0;
375 goto out;
376 }
b2476490 377
4dff95dc 378 ret = core->rate;
b2476490 379
4dff95dc
SB
380 if (core->flags & CLK_IS_ROOT)
381 goto out;
c646cbf1 382
4dff95dc
SB
383 if (!core->parent)
384 ret = 0;
b2476490 385
b2476490
MT
386out:
387 return ret;
388}
389
4dff95dc 390unsigned long __clk_get_rate(struct clk *clk)
b2476490 391{
4dff95dc
SB
392 if (!clk)
393 return 0;
6314b679 394
4dff95dc
SB
395 return clk_core_get_rate_nolock(clk->core);
396}
397EXPORT_SYMBOL_GPL(__clk_get_rate);
b2476490 398
4dff95dc
SB
399static unsigned long __clk_get_accuracy(struct clk_core *core)
400{
401 if (!core)
402 return 0;
b2476490 403
4dff95dc 404 return core->accuracy;
b2476490
MT
405}
406
4dff95dc 407unsigned long __clk_get_flags(struct clk *clk)
fcb0ee6a 408{
4dff95dc 409 return !clk ? 0 : clk->core->flags;
fcb0ee6a 410}
4dff95dc 411EXPORT_SYMBOL_GPL(__clk_get_flags);
fcb0ee6a 412
4dff95dc 413bool __clk_is_prepared(struct clk *clk)
fb2b3c9f 414{
4dff95dc
SB
415 if (!clk)
416 return false;
fb2b3c9f 417
4dff95dc 418 return clk_core_is_prepared(clk->core);
fb2b3c9f 419}
fb2b3c9f 420
4dff95dc 421bool __clk_is_enabled(struct clk *clk)
b2476490 422{
4dff95dc
SB
423 if (!clk)
424 return false;
b2476490 425
4dff95dc
SB
426 return clk_core_is_enabled(clk->core);
427}
428EXPORT_SYMBOL_GPL(__clk_is_enabled);
b2476490 429
4dff95dc
SB
430static bool mux_is_better_rate(unsigned long rate, unsigned long now,
431 unsigned long best, unsigned long flags)
432{
433 if (flags & CLK_MUX_ROUND_CLOSEST)
434 return abs(now - rate) < abs(best - rate);
1af599df 435
4dff95dc
SB
436 return now <= rate && now > best;
437}
bddca894 438
0817b62c
BB
439static int
440clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
4dff95dc
SB
441 unsigned long flags)
442{
443 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
0817b62c
BB
444 int i, num_parents, ret;
445 unsigned long best = 0;
446 struct clk_rate_request parent_req = *req;
b2476490 447
4dff95dc
SB
448 /* if NO_REPARENT flag set, pass through to current parent */
449 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
450 parent = core->parent;
0817b62c
BB
451 if (core->flags & CLK_SET_RATE_PARENT) {
452 ret = __clk_determine_rate(parent ? parent->hw : NULL,
453 &parent_req);
454 if (ret)
455 return ret;
456
457 best = parent_req.rate;
458 } else if (parent) {
4dff95dc 459 best = clk_core_get_rate_nolock(parent);
0817b62c 460 } else {
4dff95dc 461 best = clk_core_get_rate_nolock(core);
0817b62c
BB
462 }
463
4dff95dc
SB
464 goto out;
465 }
b2476490 466
4dff95dc
SB
467 /* find the parent that can provide the fastest rate <= rate */
468 num_parents = core->num_parents;
469 for (i = 0; i < num_parents; i++) {
470 parent = clk_core_get_parent_by_index(core, i);
471 if (!parent)
472 continue;
0817b62c
BB
473
474 if (core->flags & CLK_SET_RATE_PARENT) {
475 parent_req = *req;
476 ret = __clk_determine_rate(parent->hw, &parent_req);
477 if (ret)
478 continue;
479 } else {
480 parent_req.rate = clk_core_get_rate_nolock(parent);
481 }
482
483 if (mux_is_better_rate(req->rate, parent_req.rate,
484 best, flags)) {
4dff95dc 485 best_parent = parent;
0817b62c 486 best = parent_req.rate;
4dff95dc
SB
487 }
488 }
b2476490 489
57d866e6
BB
490 if (!best_parent)
491 return -EINVAL;
492
4dff95dc
SB
493out:
494 if (best_parent)
0817b62c
BB
495 req->best_parent_hw = best_parent->hw;
496 req->best_parent_rate = best;
497 req->rate = best;
b2476490 498
0817b62c 499 return 0;
b33d212f 500}
4dff95dc
SB
501
502struct clk *__clk_lookup(const char *name)
fcb0ee6a 503{
4dff95dc
SB
504 struct clk_core *core = clk_core_lookup(name);
505
506 return !core ? NULL : core->hw->clk;
fcb0ee6a 507}
b2476490 508
4dff95dc
SB
509static void clk_core_get_boundaries(struct clk_core *core,
510 unsigned long *min_rate,
511 unsigned long *max_rate)
1c155b3d 512{
4dff95dc 513 struct clk *clk_user;
1c155b3d 514
4dff95dc
SB
515 *min_rate = 0;
516 *max_rate = ULONG_MAX;
496eadf8 517
4dff95dc
SB
518 hlist_for_each_entry(clk_user, &core->clks, clks_node)
519 *min_rate = max(*min_rate, clk_user->min_rate);
1c155b3d 520
4dff95dc
SB
521 hlist_for_each_entry(clk_user, &core->clks, clks_node)
522 *max_rate = min(*max_rate, clk_user->max_rate);
523}
1c155b3d 524
4dff95dc
SB
525/*
526 * Helper for finding best parent to provide a given frequency. This can be used
527 * directly as a determine_rate callback (e.g. for a mux), or from a more
528 * complex clock that may combine a mux with other operations.
529 */
0817b62c
BB
530int __clk_mux_determine_rate(struct clk_hw *hw,
531 struct clk_rate_request *req)
4dff95dc 532{
0817b62c 533 return clk_mux_determine_rate_flags(hw, req, 0);
1c155b3d 534}
4dff95dc 535EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
1c155b3d 536
0817b62c
BB
537int __clk_mux_determine_rate_closest(struct clk_hw *hw,
538 struct clk_rate_request *req)
b2476490 539{
0817b62c 540 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
4dff95dc
SB
541}
542EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
b2476490 543
4dff95dc 544/*** clk api ***/
496eadf8 545
4dff95dc
SB
546static void clk_core_unprepare(struct clk_core *core)
547{
a6334725
SB
548 lockdep_assert_held(&prepare_lock);
549
4dff95dc
SB
550 if (!core)
551 return;
b2476490 552
4dff95dc
SB
553 if (WARN_ON(core->prepare_count == 0))
554 return;
b2476490 555
4dff95dc
SB
556 if (--core->prepare_count > 0)
557 return;
b2476490 558
4dff95dc 559 WARN_ON(core->enable_count > 0);
b2476490 560
4dff95dc 561 trace_clk_unprepare(core);
b2476490 562
4dff95dc
SB
563 if (core->ops->unprepare)
564 core->ops->unprepare(core->hw);
565
566 trace_clk_unprepare_complete(core);
567 clk_core_unprepare(core->parent);
b2476490
MT
568}
569
4dff95dc
SB
570/**
571 * clk_unprepare - undo preparation of a clock source
572 * @clk: the clk being unprepared
573 *
574 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
575 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
576 * if the operation may sleep. One example is a clk which is accessed over
577 * I2c. In the complex case a clk gate operation may require a fast and a slow
578 * part. It is this reason that clk_unprepare and clk_disable are not mutually
579 * exclusive. In fact clk_disable must be called before clk_unprepare.
580 */
581void clk_unprepare(struct clk *clk)
1e435256 582{
4dff95dc
SB
583 if (IS_ERR_OR_NULL(clk))
584 return;
585
586 clk_prepare_lock();
587 clk_core_unprepare(clk->core);
588 clk_prepare_unlock();
1e435256 589}
4dff95dc 590EXPORT_SYMBOL_GPL(clk_unprepare);
1e435256 591
4dff95dc 592static int clk_core_prepare(struct clk_core *core)
b2476490 593{
4dff95dc 594 int ret = 0;
b2476490 595
a6334725
SB
596 lockdep_assert_held(&prepare_lock);
597
4dff95dc 598 if (!core)
1e435256 599 return 0;
1e435256 600
4dff95dc
SB
601 if (core->prepare_count == 0) {
602 ret = clk_core_prepare(core->parent);
603 if (ret)
604 return ret;
b2476490 605
4dff95dc 606 trace_clk_prepare(core);
b2476490 607
4dff95dc
SB
608 if (core->ops->prepare)
609 ret = core->ops->prepare(core->hw);
b2476490 610
4dff95dc 611 trace_clk_prepare_complete(core);
1c155b3d 612
4dff95dc
SB
613 if (ret) {
614 clk_core_unprepare(core->parent);
615 return ret;
616 }
617 }
1c155b3d 618
4dff95dc 619 core->prepare_count++;
b2476490
MT
620
621 return 0;
622}
b2476490 623
4dff95dc
SB
624/**
625 * clk_prepare - prepare a clock source
626 * @clk: the clk being prepared
627 *
628 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
629 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
630 * operation may sleep. One example is a clk which is accessed over I2c. In
631 * the complex case a clk ungate operation may require a fast and a slow part.
632 * It is this reason that clk_prepare and clk_enable are not mutually
633 * exclusive. In fact clk_prepare must be called before clk_enable.
634 * Returns 0 on success, -EERROR otherwise.
635 */
636int clk_prepare(struct clk *clk)
b2476490 637{
4dff95dc 638 int ret;
b2476490 639
4dff95dc
SB
640 if (!clk)
641 return 0;
b2476490 642
4dff95dc
SB
643 clk_prepare_lock();
644 ret = clk_core_prepare(clk->core);
645 clk_prepare_unlock();
646
647 return ret;
b2476490 648}
4dff95dc 649EXPORT_SYMBOL_GPL(clk_prepare);
b2476490 650
4dff95dc 651static void clk_core_disable(struct clk_core *core)
b2476490 652{
a6334725
SB
653 lockdep_assert_held(&enable_lock);
654
4dff95dc
SB
655 if (!core)
656 return;
035a61c3 657
4dff95dc
SB
658 if (WARN_ON(core->enable_count == 0))
659 return;
b2476490 660
4dff95dc
SB
661 if (--core->enable_count > 0)
662 return;
035a61c3 663
4dff95dc 664 trace_clk_disable(core);
035a61c3 665
4dff95dc
SB
666 if (core->ops->disable)
667 core->ops->disable(core->hw);
035a61c3 668
4dff95dc 669 trace_clk_disable_complete(core);
035a61c3 670
4dff95dc 671 clk_core_disable(core->parent);
035a61c3 672}
7ef3dcc8 673
4dff95dc
SB
674/**
675 * clk_disable - gate a clock
676 * @clk: the clk being gated
677 *
678 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
679 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
680 * clk if the operation is fast and will never sleep. One example is a
681 * SoC-internal clk which is controlled via simple register writes. In the
682 * complex case a clk gate operation may require a fast and a slow part. It is
683 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
684 * In fact clk_disable must be called before clk_unprepare.
685 */
686void clk_disable(struct clk *clk)
b2476490 687{
4dff95dc
SB
688 unsigned long flags;
689
690 if (IS_ERR_OR_NULL(clk))
691 return;
692
693 flags = clk_enable_lock();
694 clk_core_disable(clk->core);
695 clk_enable_unlock(flags);
b2476490 696}
4dff95dc 697EXPORT_SYMBOL_GPL(clk_disable);
b2476490 698
4dff95dc 699static int clk_core_enable(struct clk_core *core)
b2476490 700{
4dff95dc 701 int ret = 0;
b2476490 702
a6334725
SB
703 lockdep_assert_held(&enable_lock);
704
4dff95dc
SB
705 if (!core)
706 return 0;
b2476490 707
4dff95dc
SB
708 if (WARN_ON(core->prepare_count == 0))
709 return -ESHUTDOWN;
b2476490 710
4dff95dc
SB
711 if (core->enable_count == 0) {
712 ret = clk_core_enable(core->parent);
b2476490 713
4dff95dc
SB
714 if (ret)
715 return ret;
b2476490 716
4dff95dc 717 trace_clk_enable(core);
035a61c3 718
4dff95dc
SB
719 if (core->ops->enable)
720 ret = core->ops->enable(core->hw);
035a61c3 721
4dff95dc
SB
722 trace_clk_enable_complete(core);
723
724 if (ret) {
725 clk_core_disable(core->parent);
726 return ret;
727 }
728 }
729
730 core->enable_count++;
731 return 0;
035a61c3 732}
b2476490 733
4dff95dc
SB
734/**
735 * clk_enable - ungate a clock
736 * @clk: the clk being ungated
737 *
738 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
739 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
740 * if the operation will never sleep. One example is a SoC-internal clk which
741 * is controlled via simple register writes. In the complex case a clk ungate
742 * operation may require a fast and a slow part. It is this reason that
743 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
744 * must be called before clk_enable. Returns 0 on success, -EERROR
745 * otherwise.
746 */
747int clk_enable(struct clk *clk)
5279fc40 748{
4dff95dc
SB
749 unsigned long flags;
750 int ret;
751
752 if (!clk)
5279fc40
BB
753 return 0;
754
4dff95dc
SB
755 flags = clk_enable_lock();
756 ret = clk_core_enable(clk->core);
757 clk_enable_unlock(flags);
5279fc40 758
4dff95dc 759 return ret;
b2476490 760}
4dff95dc 761EXPORT_SYMBOL_GPL(clk_enable);
b2476490 762
0817b62c
BB
763static int clk_core_round_rate_nolock(struct clk_core *core,
764 struct clk_rate_request *req)
3d6ee287 765{
4dff95dc 766 struct clk_core *parent;
0817b62c 767 long rate;
4dff95dc
SB
768
769 lockdep_assert_held(&prepare_lock);
3d6ee287 770
d6968fca 771 if (!core)
4dff95dc 772 return 0;
3d6ee287 773
4dff95dc 774 parent = core->parent;
0817b62c
BB
775 if (parent) {
776 req->best_parent_hw = parent->hw;
777 req->best_parent_rate = parent->rate;
778 } else {
779 req->best_parent_hw = NULL;
780 req->best_parent_rate = 0;
781 }
3d6ee287 782
4dff95dc 783 if (core->ops->determine_rate) {
0817b62c
BB
784 return core->ops->determine_rate(core->hw, req);
785 } else if (core->ops->round_rate) {
786 rate = core->ops->round_rate(core->hw, req->rate,
787 &req->best_parent_rate);
788 if (rate < 0)
789 return rate;
790
791 req->rate = rate;
792 } else if (core->flags & CLK_SET_RATE_PARENT) {
793 return clk_core_round_rate_nolock(parent, req);
794 } else {
795 req->rate = core->rate;
796 }
797
798 return 0;
3d6ee287
UH
799}
800
4dff95dc
SB
801/**
802 * __clk_determine_rate - get the closest rate actually supported by a clock
803 * @hw: determine the rate of this clock
804 * @rate: target rate
805 * @min_rate: returned rate must be greater than this rate
806 * @max_rate: returned rate must be less than this rate
807 *
6e5ab41b 808 * Useful for clk_ops such as .set_rate and .determine_rate.
4dff95dc 809 */
0817b62c 810int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
035a61c3 811{
0817b62c
BB
812 if (!hw) {
813 req->rate = 0;
4dff95dc 814 return 0;
0817b62c 815 }
035a61c3 816
0817b62c 817 return clk_core_round_rate_nolock(hw->core, req);
035a61c3 818}
4dff95dc 819EXPORT_SYMBOL_GPL(__clk_determine_rate);
035a61c3 820
4dff95dc
SB
821/**
822 * __clk_round_rate - round the given rate for a clk
823 * @clk: round the rate of this clock
824 * @rate: the rate which is to be rounded
825 *
6e5ab41b 826 * Useful for clk_ops such as .set_rate
4dff95dc
SB
827 */
828unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
b2476490 829{
0817b62c
BB
830 struct clk_rate_request req;
831 int ret;
b2476490 832
4dff95dc
SB
833 if (!clk)
834 return 0;
b2476490 835
0817b62c
BB
836 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
837 req.rate = rate;
838
839 ret = clk_core_round_rate_nolock(clk->core, &req);
840 if (ret)
841 return 0;
b2476490 842
0817b62c 843 return req.rate;
b2476490 844}
4dff95dc 845EXPORT_SYMBOL_GPL(__clk_round_rate);
035a61c3 846
4dff95dc
SB
847/**
848 * clk_round_rate - round the given rate for a clk
849 * @clk: the clk for which we are rounding a rate
850 * @rate: the rate which is to be rounded
851 *
852 * Takes in a rate as input and rounds it to a rate that the clk can actually
853 * use which is then returned. If clk doesn't support round_rate operation
854 * then the parent rate is returned.
855 */
856long clk_round_rate(struct clk *clk, unsigned long rate)
035a61c3 857{
4dff95dc
SB
858 unsigned long ret;
859
035a61c3 860 if (!clk)
4dff95dc 861 return 0;
035a61c3 862
4dff95dc
SB
863 clk_prepare_lock();
864 ret = __clk_round_rate(clk, rate);
865 clk_prepare_unlock();
866
867 return ret;
035a61c3 868}
4dff95dc 869EXPORT_SYMBOL_GPL(clk_round_rate);
b2476490 870
4dff95dc
SB
871/**
872 * __clk_notify - call clk notifier chain
873 * @core: clk that is changing rate
874 * @msg: clk notifier type (see include/linux/clk.h)
875 * @old_rate: old clk rate
876 * @new_rate: new clk rate
877 *
878 * Triggers a notifier call chain on the clk rate-change notification
879 * for 'clk'. Passes a pointer to the struct clk and the previous
880 * and current rates to the notifier callback. Intended to be called by
881 * internal clock code only. Returns NOTIFY_DONE from the last driver
882 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
883 * a driver returns that.
884 */
885static int __clk_notify(struct clk_core *core, unsigned long msg,
886 unsigned long old_rate, unsigned long new_rate)
b2476490 887{
4dff95dc
SB
888 struct clk_notifier *cn;
889 struct clk_notifier_data cnd;
890 int ret = NOTIFY_DONE;
b2476490 891
4dff95dc
SB
892 cnd.old_rate = old_rate;
893 cnd.new_rate = new_rate;
b2476490 894
4dff95dc
SB
895 list_for_each_entry(cn, &clk_notifier_list, node) {
896 if (cn->clk->core == core) {
897 cnd.clk = cn->clk;
898 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
899 &cnd);
900 }
b2476490
MT
901 }
902
4dff95dc 903 return ret;
b2476490
MT
904}
905
4dff95dc
SB
906/**
907 * __clk_recalc_accuracies
908 * @core: first clk in the subtree
909 *
910 * Walks the subtree of clks starting with clk and recalculates accuracies as
911 * it goes. Note that if a clk does not implement the .recalc_accuracy
6e5ab41b 912 * callback then it is assumed that the clock will take on the accuracy of its
4dff95dc 913 * parent.
4dff95dc
SB
914 */
915static void __clk_recalc_accuracies(struct clk_core *core)
b2476490 916{
4dff95dc
SB
917 unsigned long parent_accuracy = 0;
918 struct clk_core *child;
b2476490 919
4dff95dc 920 lockdep_assert_held(&prepare_lock);
b2476490 921
4dff95dc
SB
922 if (core->parent)
923 parent_accuracy = core->parent->accuracy;
b2476490 924
4dff95dc
SB
925 if (core->ops->recalc_accuracy)
926 core->accuracy = core->ops->recalc_accuracy(core->hw,
927 parent_accuracy);
928 else
929 core->accuracy = parent_accuracy;
b2476490 930
4dff95dc
SB
931 hlist_for_each_entry(child, &core->children, child_node)
932 __clk_recalc_accuracies(child);
b2476490
MT
933}
934
4dff95dc 935static long clk_core_get_accuracy(struct clk_core *core)
e366fdd7 936{
4dff95dc 937 unsigned long accuracy;
15a02c1f 938
4dff95dc
SB
939 clk_prepare_lock();
940 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
941 __clk_recalc_accuracies(core);
15a02c1f 942
4dff95dc
SB
943 accuracy = __clk_get_accuracy(core);
944 clk_prepare_unlock();
e366fdd7 945
4dff95dc 946 return accuracy;
e366fdd7 947}
15a02c1f 948
4dff95dc
SB
949/**
950 * clk_get_accuracy - return the accuracy of clk
951 * @clk: the clk whose accuracy is being returned
952 *
953 * Simply returns the cached accuracy of the clk, unless
954 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
955 * issued.
956 * If clk is NULL then returns 0.
957 */
958long clk_get_accuracy(struct clk *clk)
035a61c3 959{
4dff95dc
SB
960 if (!clk)
961 return 0;
035a61c3 962
4dff95dc 963 return clk_core_get_accuracy(clk->core);
035a61c3 964}
4dff95dc 965EXPORT_SYMBOL_GPL(clk_get_accuracy);
035a61c3 966
4dff95dc
SB
967static unsigned long clk_recalc(struct clk_core *core,
968 unsigned long parent_rate)
1c8e6004 969{
4dff95dc
SB
970 if (core->ops->recalc_rate)
971 return core->ops->recalc_rate(core->hw, parent_rate);
972 return parent_rate;
1c8e6004
TV
973}
974
4dff95dc
SB
975/**
976 * __clk_recalc_rates
977 * @core: first clk in the subtree
978 * @msg: notification type (see include/linux/clk.h)
979 *
980 * Walks the subtree of clks starting with clk and recalculates rates as it
981 * goes. Note that if a clk does not implement the .recalc_rate callback then
982 * it is assumed that the clock will take on the rate of its parent.
983 *
984 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
985 * if necessary.
15a02c1f 986 */
4dff95dc 987static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
15a02c1f 988{
4dff95dc
SB
989 unsigned long old_rate;
990 unsigned long parent_rate = 0;
991 struct clk_core *child;
e366fdd7 992
4dff95dc 993 lockdep_assert_held(&prepare_lock);
15a02c1f 994
4dff95dc 995 old_rate = core->rate;
b2476490 996
4dff95dc
SB
997 if (core->parent)
998 parent_rate = core->parent->rate;
b2476490 999
4dff95dc 1000 core->rate = clk_recalc(core, parent_rate);
b2476490 1001
4dff95dc
SB
1002 /*
1003 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1004 * & ABORT_RATE_CHANGE notifiers
1005 */
1006 if (core->notifier_count && msg)
1007 __clk_notify(core, msg, old_rate, core->rate);
b2476490 1008
4dff95dc
SB
1009 hlist_for_each_entry(child, &core->children, child_node)
1010 __clk_recalc_rates(child, msg);
1011}
b2476490 1012
4dff95dc
SB
1013static unsigned long clk_core_get_rate(struct clk_core *core)
1014{
1015 unsigned long rate;
dfc202ea 1016
4dff95dc 1017 clk_prepare_lock();
b2476490 1018
4dff95dc
SB
1019 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1020 __clk_recalc_rates(core, 0);
1021
1022 rate = clk_core_get_rate_nolock(core);
1023 clk_prepare_unlock();
1024
1025 return rate;
b2476490
MT
1026}
1027
1028/**
4dff95dc
SB
1029 * clk_get_rate - return the rate of clk
1030 * @clk: the clk whose rate is being returned
b2476490 1031 *
4dff95dc
SB
1032 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1033 * is set, which means a recalc_rate will be issued.
1034 * If clk is NULL then returns 0.
b2476490 1035 */
4dff95dc 1036unsigned long clk_get_rate(struct clk *clk)
b2476490 1037{
4dff95dc
SB
1038 if (!clk)
1039 return 0;
63589e92 1040
4dff95dc 1041 return clk_core_get_rate(clk->core);
b2476490 1042}
4dff95dc 1043EXPORT_SYMBOL_GPL(clk_get_rate);
b2476490 1044
4dff95dc
SB
1045static int clk_fetch_parent_index(struct clk_core *core,
1046 struct clk_core *parent)
b2476490 1047{
4dff95dc 1048 int i;
b2476490 1049
4dff95dc
SB
1050 if (!core->parents) {
1051 core->parents = kcalloc(core->num_parents,
1052 sizeof(struct clk *), GFP_KERNEL);
1053 if (!core->parents)
1054 return -ENOMEM;
1055 }
dfc202ea 1056
4dff95dc
SB
1057 /*
1058 * find index of new parent clock using cached parent ptrs,
1059 * or if not yet cached, use string name comparison and cache
1060 * them now to avoid future calls to clk_core_lookup.
1061 */
1062 for (i = 0; i < core->num_parents; i++) {
1063 if (core->parents[i] == parent)
1064 return i;
dfc202ea 1065
4dff95dc
SB
1066 if (core->parents[i])
1067 continue;
dfc202ea 1068
4dff95dc
SB
1069 if (!strcmp(core->parent_names[i], parent->name)) {
1070 core->parents[i] = clk_core_lookup(parent->name);
1071 return i;
b2476490
MT
1072 }
1073 }
1074
4dff95dc 1075 return -EINVAL;
b2476490
MT
1076}
1077
4dff95dc 1078static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
b2476490 1079{
4dff95dc 1080 hlist_del(&core->child_node);
035a61c3 1081
4dff95dc
SB
1082 if (new_parent) {
1083 /* avoid duplicate POST_RATE_CHANGE notifications */
1084 if (new_parent->new_child == core)
1085 new_parent->new_child = NULL;
b2476490 1086
4dff95dc
SB
1087 hlist_add_head(&core->child_node, &new_parent->children);
1088 } else {
1089 hlist_add_head(&core->child_node, &clk_orphan_list);
1090 }
dfc202ea 1091
4dff95dc 1092 core->parent = new_parent;
035a61c3
TV
1093}
1094
4dff95dc
SB
1095static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1096 struct clk_core *parent)
b2476490
MT
1097{
1098 unsigned long flags;
4dff95dc 1099 struct clk_core *old_parent = core->parent;
b2476490 1100
4dff95dc
SB
1101 /*
1102 * Migrate prepare state between parents and prevent race with
1103 * clk_enable().
1104 *
1105 * If the clock is not prepared, then a race with
1106 * clk_enable/disable() is impossible since we already have the
1107 * prepare lock (future calls to clk_enable() need to be preceded by
1108 * a clk_prepare()).
1109 *
1110 * If the clock is prepared, migrate the prepared state to the new
1111 * parent and also protect against a race with clk_enable() by
1112 * forcing the clock and the new parent on. This ensures that all
1113 * future calls to clk_enable() are practically NOPs with respect to
1114 * hardware and software states.
1115 *
1116 * See also: Comment for clk_set_parent() below.
1117 */
1118 if (core->prepare_count) {
1119 clk_core_prepare(parent);
d2a5d46b 1120 flags = clk_enable_lock();
4dff95dc
SB
1121 clk_core_enable(parent);
1122 clk_core_enable(core);
d2a5d46b 1123 clk_enable_unlock(flags);
4dff95dc 1124 }
63589e92 1125
4dff95dc 1126 /* update the clk tree topology */
eab89f69 1127 flags = clk_enable_lock();
4dff95dc 1128 clk_reparent(core, parent);
eab89f69 1129 clk_enable_unlock(flags);
4dff95dc
SB
1130
1131 return old_parent;
b2476490 1132}
b2476490 1133
4dff95dc
SB
1134static void __clk_set_parent_after(struct clk_core *core,
1135 struct clk_core *parent,
1136 struct clk_core *old_parent)
b2476490 1137{
d2a5d46b
DA
1138 unsigned long flags;
1139
4dff95dc
SB
1140 /*
1141 * Finish the migration of prepare state and undo the changes done
1142 * for preventing a race with clk_enable().
1143 */
1144 if (core->prepare_count) {
d2a5d46b 1145 flags = clk_enable_lock();
4dff95dc
SB
1146 clk_core_disable(core);
1147 clk_core_disable(old_parent);
d2a5d46b 1148 clk_enable_unlock(flags);
4dff95dc
SB
1149 clk_core_unprepare(old_parent);
1150 }
1151}
b2476490 1152
4dff95dc
SB
1153static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1154 u8 p_index)
1155{
1156 unsigned long flags;
1157 int ret = 0;
1158 struct clk_core *old_parent;
b2476490 1159
4dff95dc 1160 old_parent = __clk_set_parent_before(core, parent);
b2476490 1161
4dff95dc 1162 trace_clk_set_parent(core, parent);
b2476490 1163
4dff95dc
SB
1164 /* change clock input source */
1165 if (parent && core->ops->set_parent)
1166 ret = core->ops->set_parent(core->hw, p_index);
dfc202ea 1167
4dff95dc 1168 trace_clk_set_parent_complete(core, parent);
dfc202ea 1169
4dff95dc
SB
1170 if (ret) {
1171 flags = clk_enable_lock();
1172 clk_reparent(core, old_parent);
1173 clk_enable_unlock(flags);
dfc202ea 1174
4dff95dc 1175 if (core->prepare_count) {
d2a5d46b 1176 flags = clk_enable_lock();
4dff95dc
SB
1177 clk_core_disable(core);
1178 clk_core_disable(parent);
d2a5d46b 1179 clk_enable_unlock(flags);
4dff95dc 1180 clk_core_unprepare(parent);
b2476490 1181 }
4dff95dc 1182 return ret;
b2476490
MT
1183 }
1184
4dff95dc
SB
1185 __clk_set_parent_after(core, parent, old_parent);
1186
b2476490
MT
1187 return 0;
1188}
1189
1190/**
4dff95dc
SB
1191 * __clk_speculate_rates
1192 * @core: first clk in the subtree
1193 * @parent_rate: the "future" rate of clk's parent
b2476490 1194 *
4dff95dc
SB
1195 * Walks the subtree of clks starting with clk, speculating rates as it
1196 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1197 *
1198 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1199 * pre-rate change notifications and returns early if no clks in the
1200 * subtree have subscribed to the notifications. Note that if a clk does not
1201 * implement the .recalc_rate callback then it is assumed that the clock will
1202 * take on the rate of its parent.
b2476490 1203 */
4dff95dc
SB
1204static int __clk_speculate_rates(struct clk_core *core,
1205 unsigned long parent_rate)
b2476490 1206{
4dff95dc
SB
1207 struct clk_core *child;
1208 unsigned long new_rate;
1209 int ret = NOTIFY_DONE;
b2476490 1210
4dff95dc 1211 lockdep_assert_held(&prepare_lock);
864e160a 1212
4dff95dc
SB
1213 new_rate = clk_recalc(core, parent_rate);
1214
1215 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1216 if (core->notifier_count)
1217 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1218
1219 if (ret & NOTIFY_STOP_MASK) {
1220 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1221 __func__, core->name, ret);
1222 goto out;
1223 }
1224
1225 hlist_for_each_entry(child, &core->children, child_node) {
1226 ret = __clk_speculate_rates(child, new_rate);
1227 if (ret & NOTIFY_STOP_MASK)
1228 break;
1229 }
b2476490 1230
4dff95dc 1231out:
b2476490
MT
1232 return ret;
1233}
b2476490 1234
4dff95dc
SB
1235static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1236 struct clk_core *new_parent, u8 p_index)
b2476490 1237{
4dff95dc 1238 struct clk_core *child;
b2476490 1239
4dff95dc
SB
1240 core->new_rate = new_rate;
1241 core->new_parent = new_parent;
1242 core->new_parent_index = p_index;
1243 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1244 core->new_child = NULL;
1245 if (new_parent && new_parent != core->parent)
1246 new_parent->new_child = core;
496eadf8 1247
4dff95dc
SB
1248 hlist_for_each_entry(child, &core->children, child_node) {
1249 child->new_rate = clk_recalc(child, new_rate);
1250 clk_calc_subtree(child, child->new_rate, NULL, 0);
1251 }
1252}
b2476490 1253
4dff95dc
SB
1254/*
1255 * calculate the new rates returning the topmost clock that has to be
1256 * changed.
1257 */
1258static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1259 unsigned long rate)
1260{
1261 struct clk_core *top = core;
1262 struct clk_core *old_parent, *parent;
4dff95dc
SB
1263 unsigned long best_parent_rate = 0;
1264 unsigned long new_rate;
1265 unsigned long min_rate;
1266 unsigned long max_rate;
1267 int p_index = 0;
1268 long ret;
1269
1270 /* sanity */
1271 if (IS_ERR_OR_NULL(core))
1272 return NULL;
1273
1274 /* save parent rate, if it exists */
1275 parent = old_parent = core->parent;
71472c0c 1276 if (parent)
4dff95dc 1277 best_parent_rate = parent->rate;
71472c0c 1278
4dff95dc
SB
1279 clk_core_get_boundaries(core, &min_rate, &max_rate);
1280
1281 /* find the closest rate and parent clk/rate */
d6968fca 1282 if (core->ops->determine_rate) {
0817b62c
BB
1283 struct clk_rate_request req;
1284
1285 req.rate = rate;
1286 req.min_rate = min_rate;
1287 req.max_rate = max_rate;
1288 if (parent) {
1289 req.best_parent_hw = parent->hw;
1290 req.best_parent_rate = parent->rate;
1291 } else {
1292 req.best_parent_hw = NULL;
1293 req.best_parent_rate = 0;
1294 }
1295
1296 ret = core->ops->determine_rate(core->hw, &req);
4dff95dc
SB
1297 if (ret < 0)
1298 return NULL;
1c8e6004 1299
0817b62c
BB
1300 best_parent_rate = req.best_parent_rate;
1301 new_rate = req.rate;
1302 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
4dff95dc
SB
1303 } else if (core->ops->round_rate) {
1304 ret = core->ops->round_rate(core->hw, rate,
0817b62c 1305 &best_parent_rate);
4dff95dc
SB
1306 if (ret < 0)
1307 return NULL;
035a61c3 1308
4dff95dc
SB
1309 new_rate = ret;
1310 if (new_rate < min_rate || new_rate > max_rate)
1311 return NULL;
1312 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1313 /* pass-through clock without adjustable parent */
1314 core->new_rate = core->rate;
1315 return NULL;
1316 } else {
1317 /* pass-through clock with adjustable parent */
1318 top = clk_calc_new_rates(parent, rate);
1319 new_rate = parent->new_rate;
1320 goto out;
1321 }
1c8e6004 1322
4dff95dc
SB
1323 /* some clocks must be gated to change parent */
1324 if (parent != old_parent &&
1325 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1326 pr_debug("%s: %s not gated but wants to reparent\n",
1327 __func__, core->name);
1328 return NULL;
1329 }
b2476490 1330
4dff95dc
SB
1331 /* try finding the new parent index */
1332 if (parent && core->num_parents > 1) {
1333 p_index = clk_fetch_parent_index(core, parent);
1334 if (p_index < 0) {
1335 pr_debug("%s: clk %s can not be parent of clk %s\n",
1336 __func__, parent->name, core->name);
1337 return NULL;
1338 }
1339 }
b2476490 1340
4dff95dc
SB
1341 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1342 best_parent_rate != parent->rate)
1343 top = clk_calc_new_rates(parent, best_parent_rate);
035a61c3 1344
4dff95dc
SB
1345out:
1346 clk_calc_subtree(core, new_rate, parent, p_index);
b2476490 1347
4dff95dc 1348 return top;
b2476490 1349}
b2476490 1350
4dff95dc
SB
1351/*
1352 * Notify about rate changes in a subtree. Always walk down the whole tree
1353 * so that in case of an error we can walk down the whole tree again and
1354 * abort the change.
b2476490 1355 */
4dff95dc
SB
1356static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1357 unsigned long event)
b2476490 1358{
4dff95dc 1359 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
b2476490
MT
1360 int ret = NOTIFY_DONE;
1361
4dff95dc
SB
1362 if (core->rate == core->new_rate)
1363 return NULL;
b2476490 1364
4dff95dc
SB
1365 if (core->notifier_count) {
1366 ret = __clk_notify(core, event, core->rate, core->new_rate);
1367 if (ret & NOTIFY_STOP_MASK)
1368 fail_clk = core;
b2476490
MT
1369 }
1370
4dff95dc
SB
1371 hlist_for_each_entry(child, &core->children, child_node) {
1372 /* Skip children who will be reparented to another clock */
1373 if (child->new_parent && child->new_parent != core)
1374 continue;
1375 tmp_clk = clk_propagate_rate_change(child, event);
1376 if (tmp_clk)
1377 fail_clk = tmp_clk;
1378 }
5279fc40 1379
4dff95dc
SB
1380 /* handle the new child who might not be in core->children yet */
1381 if (core->new_child) {
1382 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1383 if (tmp_clk)
1384 fail_clk = tmp_clk;
1385 }
5279fc40 1386
4dff95dc 1387 return fail_clk;
5279fc40
BB
1388}
1389
4dff95dc
SB
1390/*
1391 * walk down a subtree and set the new rates notifying the rate
1392 * change on the way
1393 */
1394static void clk_change_rate(struct clk_core *core)
035a61c3 1395{
4dff95dc
SB
1396 struct clk_core *child;
1397 struct hlist_node *tmp;
1398 unsigned long old_rate;
1399 unsigned long best_parent_rate = 0;
1400 bool skip_set_rate = false;
1401 struct clk_core *old_parent;
035a61c3 1402
4dff95dc 1403 old_rate = core->rate;
035a61c3 1404
4dff95dc
SB
1405 if (core->new_parent)
1406 best_parent_rate = core->new_parent->rate;
1407 else if (core->parent)
1408 best_parent_rate = core->parent->rate;
035a61c3 1409
4dff95dc
SB
1410 if (core->new_parent && core->new_parent != core->parent) {
1411 old_parent = __clk_set_parent_before(core, core->new_parent);
1412 trace_clk_set_parent(core, core->new_parent);
5279fc40 1413
4dff95dc
SB
1414 if (core->ops->set_rate_and_parent) {
1415 skip_set_rate = true;
1416 core->ops->set_rate_and_parent(core->hw, core->new_rate,
1417 best_parent_rate,
1418 core->new_parent_index);
1419 } else if (core->ops->set_parent) {
1420 core->ops->set_parent(core->hw, core->new_parent_index);
1421 }
5279fc40 1422
4dff95dc
SB
1423 trace_clk_set_parent_complete(core, core->new_parent);
1424 __clk_set_parent_after(core, core->new_parent, old_parent);
1425 }
8f2c2db1 1426
4dff95dc 1427 trace_clk_set_rate(core, core->new_rate);
b2476490 1428
4dff95dc
SB
1429 if (!skip_set_rate && core->ops->set_rate)
1430 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
496eadf8 1431
4dff95dc 1432 trace_clk_set_rate_complete(core, core->new_rate);
b2476490 1433
4dff95dc 1434 core->rate = clk_recalc(core, best_parent_rate);
b2476490 1435
4dff95dc
SB
1436 if (core->notifier_count && old_rate != core->rate)
1437 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
b2476490 1438
85e88fab
MT
1439 if (core->flags & CLK_RECALC_NEW_RATES)
1440 (void)clk_calc_new_rates(core, core->new_rate);
d8d91987 1441
b2476490 1442 /*
4dff95dc
SB
1443 * Use safe iteration, as change_rate can actually swap parents
1444 * for certain clock types.
b2476490 1445 */
4dff95dc
SB
1446 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
1447 /* Skip children who will be reparented to another clock */
1448 if (child->new_parent && child->new_parent != core)
1449 continue;
1450 clk_change_rate(child);
1451 }
b2476490 1452
4dff95dc
SB
1453 /* handle the new child who might not be in core->children yet */
1454 if (core->new_child)
1455 clk_change_rate(core->new_child);
b2476490
MT
1456}
1457
4dff95dc
SB
1458static int clk_core_set_rate_nolock(struct clk_core *core,
1459 unsigned long req_rate)
a093bde2 1460{
4dff95dc
SB
1461 struct clk_core *top, *fail_clk;
1462 unsigned long rate = req_rate;
1463 int ret = 0;
a093bde2 1464
4dff95dc
SB
1465 if (!core)
1466 return 0;
a093bde2 1467
4dff95dc
SB
1468 /* bail early if nothing to do */
1469 if (rate == clk_core_get_rate_nolock(core))
1470 return 0;
a093bde2 1471
4dff95dc
SB
1472 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
1473 return -EBUSY;
a093bde2 1474
4dff95dc
SB
1475 /* calculate new rates and get the topmost changed clock */
1476 top = clk_calc_new_rates(core, rate);
1477 if (!top)
1478 return -EINVAL;
1479
1480 /* notify that we are about to change rates */
1481 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1482 if (fail_clk) {
1483 pr_debug("%s: failed to set %s rate\n", __func__,
1484 fail_clk->name);
1485 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1486 return -EBUSY;
1487 }
1488
1489 /* change the rates */
1490 clk_change_rate(top);
1491
1492 core->req_rate = req_rate;
1493
1494 return ret;
a093bde2 1495}
035a61c3
TV
1496
1497/**
4dff95dc
SB
1498 * clk_set_rate - specify a new rate for clk
1499 * @clk: the clk whose rate is being changed
1500 * @rate: the new rate for clk
035a61c3 1501 *
4dff95dc
SB
1502 * In the simplest case clk_set_rate will only adjust the rate of clk.
1503 *
1504 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1505 * propagate up to clk's parent; whether or not this happens depends on the
1506 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1507 * after calling .round_rate then upstream parent propagation is ignored. If
1508 * *parent_rate comes back with a new rate for clk's parent then we propagate
1509 * up to clk's parent and set its rate. Upward propagation will continue
1510 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1511 * .round_rate stops requesting changes to clk's parent_rate.
1512 *
1513 * Rate changes are accomplished via tree traversal that also recalculates the
1514 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1515 *
1516 * Returns 0 on success, -EERROR otherwise.
035a61c3 1517 */
4dff95dc 1518int clk_set_rate(struct clk *clk, unsigned long rate)
035a61c3 1519{
4dff95dc
SB
1520 int ret;
1521
035a61c3
TV
1522 if (!clk)
1523 return 0;
1524
4dff95dc
SB
1525 /* prevent racing with updates to the clock topology */
1526 clk_prepare_lock();
da0f0b2c 1527
4dff95dc 1528 ret = clk_core_set_rate_nolock(clk->core, rate);
da0f0b2c 1529
4dff95dc 1530 clk_prepare_unlock();
4935b22c 1531
4dff95dc 1532 return ret;
4935b22c 1533}
4dff95dc 1534EXPORT_SYMBOL_GPL(clk_set_rate);
4935b22c 1535
4dff95dc
SB
1536/**
1537 * clk_set_rate_range - set a rate range for a clock source
1538 * @clk: clock source
1539 * @min: desired minimum clock rate in Hz, inclusive
1540 * @max: desired maximum clock rate in Hz, inclusive
1541 *
1542 * Returns success (0) or negative errno.
1543 */
1544int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
4935b22c 1545{
4dff95dc 1546 int ret = 0;
4935b22c 1547
4dff95dc
SB
1548 if (!clk)
1549 return 0;
903efc55 1550
4dff95dc
SB
1551 if (min > max) {
1552 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1553 __func__, clk->core->name, clk->dev_id, clk->con_id,
1554 min, max);
1555 return -EINVAL;
903efc55 1556 }
4935b22c 1557
4dff95dc 1558 clk_prepare_lock();
4935b22c 1559
4dff95dc
SB
1560 if (min != clk->min_rate || max != clk->max_rate) {
1561 clk->min_rate = min;
1562 clk->max_rate = max;
1563 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4935b22c
JH
1564 }
1565
4dff95dc 1566 clk_prepare_unlock();
4935b22c 1567
4dff95dc 1568 return ret;
3fa2252b 1569}
4dff95dc 1570EXPORT_SYMBOL_GPL(clk_set_rate_range);
3fa2252b 1571
4dff95dc
SB
1572/**
1573 * clk_set_min_rate - set a minimum clock rate for a clock source
1574 * @clk: clock source
1575 * @rate: desired minimum clock rate in Hz, inclusive
1576 *
1577 * Returns success (0) or negative errno.
1578 */
1579int clk_set_min_rate(struct clk *clk, unsigned long rate)
3fa2252b 1580{
4dff95dc
SB
1581 if (!clk)
1582 return 0;
1583
1584 return clk_set_rate_range(clk, rate, clk->max_rate);
3fa2252b 1585}
4dff95dc 1586EXPORT_SYMBOL_GPL(clk_set_min_rate);
3fa2252b 1587
4dff95dc
SB
1588/**
1589 * clk_set_max_rate - set a maximum clock rate for a clock source
1590 * @clk: clock source
1591 * @rate: desired maximum clock rate in Hz, inclusive
1592 *
1593 * Returns success (0) or negative errno.
1594 */
1595int clk_set_max_rate(struct clk *clk, unsigned long rate)
3fa2252b 1596{
4dff95dc
SB
1597 if (!clk)
1598 return 0;
4935b22c 1599
4dff95dc 1600 return clk_set_rate_range(clk, clk->min_rate, rate);
4935b22c 1601}
4dff95dc 1602EXPORT_SYMBOL_GPL(clk_set_max_rate);
4935b22c 1603
b2476490 1604/**
4dff95dc
SB
1605 * clk_get_parent - return the parent of a clk
1606 * @clk: the clk whose parent gets returned
b2476490 1607 *
4dff95dc 1608 * Simply returns clk->parent. Returns NULL if clk is NULL.
b2476490 1609 */
4dff95dc 1610struct clk *clk_get_parent(struct clk *clk)
b2476490 1611{
4dff95dc 1612 struct clk *parent;
b2476490 1613
4dff95dc
SB
1614 clk_prepare_lock();
1615 parent = __clk_get_parent(clk);
1616 clk_prepare_unlock();
496eadf8 1617
4dff95dc
SB
1618 return parent;
1619}
1620EXPORT_SYMBOL_GPL(clk_get_parent);
b2476490 1621
4dff95dc
SB
1622/*
1623 * .get_parent is mandatory for clocks with multiple possible parents. It is
1624 * optional for single-parent clocks. Always call .get_parent if it is
1625 * available and WARN if it is missing for multi-parent clocks.
1626 *
1627 * For single-parent clocks without .get_parent, first check to see if the
1628 * .parents array exists, and if so use it to avoid an expensive tree
1629 * traversal. If .parents does not exist then walk the tree.
1630 */
1631static struct clk_core *__clk_init_parent(struct clk_core *core)
1632{
1633 struct clk_core *ret = NULL;
1634 u8 index;
b2476490 1635
4dff95dc
SB
1636 /* handle the trivial cases */
1637
1638 if (!core->num_parents)
b2476490
MT
1639 goto out;
1640
4dff95dc
SB
1641 if (core->num_parents == 1) {
1642 if (IS_ERR_OR_NULL(core->parent))
1643 core->parent = clk_core_lookup(core->parent_names[0]);
1644 ret = core->parent;
1645 goto out;
b2476490
MT
1646 }
1647
4dff95dc
SB
1648 if (!core->ops->get_parent) {
1649 WARN(!core->ops->get_parent,
1650 "%s: multi-parent clocks must implement .get_parent\n",
1651 __func__);
1652 goto out;
1653 };
1654
1655 /*
1656 * Do our best to cache parent clocks in core->parents. This prevents
1657 * unnecessary and expensive lookups. We don't set core->parent here;
1658 * that is done by the calling function.
1659 */
1660
1661 index = core->ops->get_parent(core->hw);
1662
1663 if (!core->parents)
1664 core->parents =
1665 kcalloc(core->num_parents, sizeof(struct clk *),
1666 GFP_KERNEL);
1667
1668 ret = clk_core_get_parent_by_index(core, index);
1669
b2476490
MT
1670out:
1671 return ret;
1672}
1673
4dff95dc
SB
1674static void clk_core_reparent(struct clk_core *core,
1675 struct clk_core *new_parent)
b2476490 1676{
4dff95dc
SB
1677 clk_reparent(core, new_parent);
1678 __clk_recalc_accuracies(core);
1679 __clk_recalc_rates(core, POST_RATE_CHANGE);
b2476490
MT
1680}
1681
42c86547
TV
1682void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
1683{
1684 if (!hw)
1685 return;
1686
1687 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
1688}
1689
4dff95dc
SB
1690/**
1691 * clk_has_parent - check if a clock is a possible parent for another
1692 * @clk: clock source
1693 * @parent: parent clock source
1694 *
1695 * This function can be used in drivers that need to check that a clock can be
1696 * the parent of another without actually changing the parent.
1697 *
1698 * Returns true if @parent is a possible parent for @clk, false otherwise.
b2476490 1699 */
4dff95dc 1700bool clk_has_parent(struct clk *clk, struct clk *parent)
b2476490 1701{
4dff95dc
SB
1702 struct clk_core *core, *parent_core;
1703 unsigned int i;
b2476490 1704
4dff95dc
SB
1705 /* NULL clocks should be nops, so return success if either is NULL. */
1706 if (!clk || !parent)
1707 return true;
7452b219 1708
4dff95dc
SB
1709 core = clk->core;
1710 parent_core = parent->core;
71472c0c 1711
4dff95dc
SB
1712 /* Optimize for the case where the parent is already the parent. */
1713 if (core->parent == parent_core)
1714 return true;
1c8e6004 1715
4dff95dc
SB
1716 for (i = 0; i < core->num_parents; i++)
1717 if (strcmp(core->parent_names[i], parent_core->name) == 0)
1718 return true;
03bc10ab 1719
4dff95dc
SB
1720 return false;
1721}
1722EXPORT_SYMBOL_GPL(clk_has_parent);
03bc10ab 1723
4dff95dc
SB
1724static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
1725{
1726 int ret = 0;
1727 int p_index = 0;
1728 unsigned long p_rate = 0;
1729
1730 if (!core)
1731 return 0;
1732
1733 /* prevent racing with updates to the clock topology */
1734 clk_prepare_lock();
1735
1736 if (core->parent == parent)
1737 goto out;
1738
1739 /* verify ops for for multi-parent clks */
1740 if ((core->num_parents > 1) && (!core->ops->set_parent)) {
1741 ret = -ENOSYS;
63f5c3b2 1742 goto out;
7452b219
MT
1743 }
1744
4dff95dc
SB
1745 /* check that we are allowed to re-parent if the clock is in use */
1746 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1747 ret = -EBUSY;
1748 goto out;
b2476490
MT
1749 }
1750
71472c0c 1751 /* try finding the new parent index */
4dff95dc 1752 if (parent) {
d6968fca 1753 p_index = clk_fetch_parent_index(core, parent);
4dff95dc 1754 p_rate = parent->rate;
f1c8b2ed 1755 if (p_index < 0) {
71472c0c 1756 pr_debug("%s: clk %s can not be parent of clk %s\n",
4dff95dc
SB
1757 __func__, parent->name, core->name);
1758 ret = p_index;
1759 goto out;
71472c0c 1760 }
b2476490
MT
1761 }
1762
4dff95dc
SB
1763 /* propagate PRE_RATE_CHANGE notifications */
1764 ret = __clk_speculate_rates(core, p_rate);
b2476490 1765
4dff95dc
SB
1766 /* abort if a driver objects */
1767 if (ret & NOTIFY_STOP_MASK)
1768 goto out;
b2476490 1769
4dff95dc
SB
1770 /* do the re-parent */
1771 ret = __clk_set_parent(core, parent, p_index);
b2476490 1772
4dff95dc
SB
1773 /* propagate rate an accuracy recalculation accordingly */
1774 if (ret) {
1775 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
1776 } else {
1777 __clk_recalc_rates(core, POST_RATE_CHANGE);
1778 __clk_recalc_accuracies(core);
b2476490
MT
1779 }
1780
4dff95dc
SB
1781out:
1782 clk_prepare_unlock();
71472c0c 1783
4dff95dc
SB
1784 return ret;
1785}
b2476490 1786
4dff95dc
SB
1787/**
1788 * clk_set_parent - switch the parent of a mux clk
1789 * @clk: the mux clk whose input we are switching
1790 * @parent: the new input to clk
1791 *
1792 * Re-parent clk to use parent as its new input source. If clk is in
1793 * prepared state, the clk will get enabled for the duration of this call. If
1794 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1795 * that, the reparenting is glitchy in hardware, etc), use the
1796 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1797 *
1798 * After successfully changing clk's parent clk_set_parent will update the
1799 * clk topology, sysfs topology and propagate rate recalculation via
1800 * __clk_recalc_rates.
1801 *
1802 * Returns 0 on success, -EERROR otherwise.
1803 */
1804int clk_set_parent(struct clk *clk, struct clk *parent)
1805{
1806 if (!clk)
1807 return 0;
1808
1809 return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
b2476490 1810}
4dff95dc 1811EXPORT_SYMBOL_GPL(clk_set_parent);
b2476490 1812
4dff95dc
SB
1813/**
1814 * clk_set_phase - adjust the phase shift of a clock signal
1815 * @clk: clock signal source
1816 * @degrees: number of degrees the signal is shifted
1817 *
1818 * Shifts the phase of a clock signal by the specified
1819 * degrees. Returns 0 on success, -EERROR otherwise.
1820 *
1821 * This function makes no distinction about the input or reference
1822 * signal that we adjust the clock signal phase against. For example
1823 * phase locked-loop clock signal generators we may shift phase with
1824 * respect to feedback clock signal input, but for other cases the
1825 * clock phase may be shifted with respect to some other, unspecified
1826 * signal.
1827 *
1828 * Additionally the concept of phase shift does not propagate through
1829 * the clock tree hierarchy, which sets it apart from clock rates and
1830 * clock accuracy. A parent clock phase attribute does not have an
1831 * impact on the phase attribute of a child clock.
b2476490 1832 */
4dff95dc 1833int clk_set_phase(struct clk *clk, int degrees)
b2476490 1834{
4dff95dc 1835 int ret = -EINVAL;
b2476490 1836
4dff95dc
SB
1837 if (!clk)
1838 return 0;
b2476490 1839
4dff95dc
SB
1840 /* sanity check degrees */
1841 degrees %= 360;
1842 if (degrees < 0)
1843 degrees += 360;
bf47b4fd 1844
4dff95dc 1845 clk_prepare_lock();
3fa2252b 1846
4dff95dc 1847 trace_clk_set_phase(clk->core, degrees);
3fa2252b 1848
4dff95dc
SB
1849 if (clk->core->ops->set_phase)
1850 ret = clk->core->ops->set_phase(clk->core->hw, degrees);
3fa2252b 1851
4dff95dc 1852 trace_clk_set_phase_complete(clk->core, degrees);
dfc202ea 1853
4dff95dc
SB
1854 if (!ret)
1855 clk->core->phase = degrees;
b2476490 1856
4dff95dc 1857 clk_prepare_unlock();
dfc202ea 1858
4dff95dc
SB
1859 return ret;
1860}
1861EXPORT_SYMBOL_GPL(clk_set_phase);
b2476490 1862
4dff95dc
SB
1863static int clk_core_get_phase(struct clk_core *core)
1864{
1865 int ret;
b2476490 1866
4dff95dc
SB
1867 clk_prepare_lock();
1868 ret = core->phase;
1869 clk_prepare_unlock();
71472c0c 1870
4dff95dc 1871 return ret;
b2476490
MT
1872}
1873
4dff95dc
SB
1874/**
1875 * clk_get_phase - return the phase shift of a clock signal
1876 * @clk: clock signal source
1877 *
1878 * Returns the phase shift of a clock node in degrees, otherwise returns
1879 * -EERROR.
1880 */
1881int clk_get_phase(struct clk *clk)
1c8e6004 1882{
4dff95dc 1883 if (!clk)
1c8e6004
TV
1884 return 0;
1885
4dff95dc
SB
1886 return clk_core_get_phase(clk->core);
1887}
1888EXPORT_SYMBOL_GPL(clk_get_phase);
1c8e6004 1889
4dff95dc
SB
1890/**
1891 * clk_is_match - check if two clk's point to the same hardware clock
1892 * @p: clk compared against q
1893 * @q: clk compared against p
1894 *
1895 * Returns true if the two struct clk pointers both point to the same hardware
1896 * clock node. Put differently, returns true if struct clk *p and struct clk *q
1897 * share the same struct clk_core object.
1898 *
1899 * Returns false otherwise. Note that two NULL clks are treated as matching.
1900 */
1901bool clk_is_match(const struct clk *p, const struct clk *q)
1902{
1903 /* trivial case: identical struct clk's or both NULL */
1904 if (p == q)
1905 return true;
1c8e6004 1906
4dff95dc
SB
1907 /* true if clk->core pointers match. Avoid derefing garbage */
1908 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
1909 if (p->core == q->core)
1910 return true;
1c8e6004 1911
4dff95dc
SB
1912 return false;
1913}
1914EXPORT_SYMBOL_GPL(clk_is_match);
1c8e6004 1915
4dff95dc 1916/*** debugfs support ***/
1c8e6004 1917
4dff95dc
SB
1918#ifdef CONFIG_DEBUG_FS
1919#include <linux/debugfs.h>
1c8e6004 1920
4dff95dc
SB
1921static struct dentry *rootdir;
1922static int inited = 0;
1923static DEFINE_MUTEX(clk_debug_lock);
1924static HLIST_HEAD(clk_debug_list);
1c8e6004 1925
4dff95dc
SB
1926static struct hlist_head *all_lists[] = {
1927 &clk_root_list,
1928 &clk_orphan_list,
1929 NULL,
1930};
1931
1932static struct hlist_head *orphan_list[] = {
1933 &clk_orphan_list,
1934 NULL,
1935};
1936
1937static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
1938 int level)
b2476490 1939{
4dff95dc
SB
1940 if (!c)
1941 return;
b2476490 1942
4dff95dc
SB
1943 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
1944 level * 3 + 1, "",
1945 30 - level * 3, c->name,
1946 c->enable_count, c->prepare_count, clk_core_get_rate(c),
1947 clk_core_get_accuracy(c), clk_core_get_phase(c));
1948}
89ac8d7a 1949
4dff95dc
SB
1950static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
1951 int level)
1952{
1953 struct clk_core *child;
b2476490 1954
4dff95dc
SB
1955 if (!c)
1956 return;
b2476490 1957
4dff95dc 1958 clk_summary_show_one(s, c, level);
0e1c0301 1959
4dff95dc
SB
1960 hlist_for_each_entry(child, &c->children, child_node)
1961 clk_summary_show_subtree(s, child, level + 1);
1c8e6004 1962}
b2476490 1963
4dff95dc 1964static int clk_summary_show(struct seq_file *s, void *data)
1c8e6004 1965{
4dff95dc
SB
1966 struct clk_core *c;
1967 struct hlist_head **lists = (struct hlist_head **)s->private;
1c8e6004 1968
4dff95dc
SB
1969 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
1970 seq_puts(s, "----------------------------------------------------------------------------------------\n");
b2476490 1971
1c8e6004
TV
1972 clk_prepare_lock();
1973
4dff95dc
SB
1974 for (; *lists; lists++)
1975 hlist_for_each_entry(c, *lists, child_node)
1976 clk_summary_show_subtree(s, c, 0);
b2476490 1977
eab89f69 1978 clk_prepare_unlock();
b2476490 1979
4dff95dc 1980 return 0;
b2476490 1981}
1c8e6004 1982
1c8e6004 1983
4dff95dc 1984static int clk_summary_open(struct inode *inode, struct file *file)
1c8e6004 1985{
4dff95dc 1986 return single_open(file, clk_summary_show, inode->i_private);
1c8e6004 1987}
b2476490 1988
4dff95dc
SB
1989static const struct file_operations clk_summary_fops = {
1990 .open = clk_summary_open,
1991 .read = seq_read,
1992 .llseek = seq_lseek,
1993 .release = single_release,
1994};
b2476490 1995
4dff95dc
SB
1996static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
1997{
1998 if (!c)
1999 return;
b2476490 2000
7cb81136 2001 /* This should be JSON format, i.e. elements separated with a comma */
4dff95dc
SB
2002 seq_printf(s, "\"%s\": { ", c->name);
2003 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2004 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
7cb81136
SW
2005 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2006 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
4dff95dc 2007 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
b2476490 2008}
b2476490 2009
4dff95dc 2010static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
b2476490 2011{
4dff95dc 2012 struct clk_core *child;
b2476490 2013
4dff95dc
SB
2014 if (!c)
2015 return;
b2476490 2016
4dff95dc 2017 clk_dump_one(s, c, level);
b2476490 2018
4dff95dc
SB
2019 hlist_for_each_entry(child, &c->children, child_node) {
2020 seq_printf(s, ",");
2021 clk_dump_subtree(s, child, level + 1);
b2476490
MT
2022 }
2023
4dff95dc 2024 seq_printf(s, "}");
b2476490
MT
2025}
2026
4dff95dc 2027static int clk_dump(struct seq_file *s, void *data)
4e88f3de 2028{
4dff95dc
SB
2029 struct clk_core *c;
2030 bool first_node = true;
2031 struct hlist_head **lists = (struct hlist_head **)s->private;
4e88f3de 2032
4dff95dc 2033 seq_printf(s, "{");
4e88f3de 2034
4dff95dc 2035 clk_prepare_lock();
035a61c3 2036
4dff95dc
SB
2037 for (; *lists; lists++) {
2038 hlist_for_each_entry(c, *lists, child_node) {
2039 if (!first_node)
2040 seq_puts(s, ",");
2041 first_node = false;
2042 clk_dump_subtree(s, c, 0);
2043 }
2044 }
4e88f3de 2045
4dff95dc 2046 clk_prepare_unlock();
4e88f3de 2047
70e9f4dd 2048 seq_puts(s, "}\n");
4dff95dc 2049 return 0;
4e88f3de 2050}
4e88f3de 2051
4dff95dc
SB
2052
2053static int clk_dump_open(struct inode *inode, struct file *file)
b2476490 2054{
4dff95dc
SB
2055 return single_open(file, clk_dump, inode->i_private);
2056}
b2476490 2057
4dff95dc
SB
2058static const struct file_operations clk_dump_fops = {
2059 .open = clk_dump_open,
2060 .read = seq_read,
2061 .llseek = seq_lseek,
2062 .release = single_release,
2063};
89ac8d7a 2064
4dff95dc
SB
2065static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
2066{
2067 struct dentry *d;
2068 int ret = -ENOMEM;
b2476490 2069
4dff95dc
SB
2070 if (!core || !pdentry) {
2071 ret = -EINVAL;
b2476490 2072 goto out;
4dff95dc 2073 }
b2476490 2074
4dff95dc
SB
2075 d = debugfs_create_dir(core->name, pdentry);
2076 if (!d)
b61c43c0 2077 goto out;
b61c43c0 2078
4dff95dc
SB
2079 core->dentry = d;
2080
2081 d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
2082 (u32 *)&core->rate);
2083 if (!d)
2084 goto err_out;
2085
2086 d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
2087 (u32 *)&core->accuracy);
2088 if (!d)
2089 goto err_out;
2090
2091 d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
2092 (u32 *)&core->phase);
2093 if (!d)
2094 goto err_out;
031dcc9b 2095
4dff95dc
SB
2096 d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
2097 (u32 *)&core->flags);
2098 if (!d)
2099 goto err_out;
031dcc9b 2100
4dff95dc
SB
2101 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
2102 (u32 *)&core->prepare_count);
2103 if (!d)
2104 goto err_out;
b2476490 2105
4dff95dc
SB
2106 d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
2107 (u32 *)&core->enable_count);
2108 if (!d)
2109 goto err_out;
b2476490 2110
4dff95dc
SB
2111 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
2112 (u32 *)&core->notifier_count);
2113 if (!d)
2114 goto err_out;
b2476490 2115
4dff95dc
SB
2116 if (core->ops->debug_init) {
2117 ret = core->ops->debug_init(core->hw, core->dentry);
2118 if (ret)
2119 goto err_out;
5279fc40 2120 }
b2476490 2121
4dff95dc
SB
2122 ret = 0;
2123 goto out;
b2476490 2124
4dff95dc
SB
2125err_out:
2126 debugfs_remove_recursive(core->dentry);
2127 core->dentry = NULL;
2128out:
b2476490
MT
2129 return ret;
2130}
035a61c3
TV
2131
2132/**
6e5ab41b
SB
2133 * clk_debug_register - add a clk node to the debugfs clk directory
2134 * @core: the clk being added to the debugfs clk directory
035a61c3 2135 *
6e5ab41b
SB
2136 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2137 * initialized. Otherwise it bails out early since the debugfs clk directory
4dff95dc 2138 * will be created lazily by clk_debug_init as part of a late_initcall.
035a61c3 2139 */
4dff95dc 2140static int clk_debug_register(struct clk_core *core)
035a61c3 2141{
4dff95dc 2142 int ret = 0;
035a61c3 2143
4dff95dc
SB
2144 mutex_lock(&clk_debug_lock);
2145 hlist_add_head(&core->debug_node, &clk_debug_list);
2146
2147 if (!inited)
2148 goto unlock;
2149
2150 ret = clk_debug_create_one(core, rootdir);
2151unlock:
2152 mutex_unlock(&clk_debug_lock);
2153
2154 return ret;
035a61c3 2155}
b2476490 2156
4dff95dc 2157 /**
6e5ab41b
SB
2158 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2159 * @core: the clk being removed from the debugfs clk directory
e59c5371 2160 *
6e5ab41b
SB
2161 * Dynamically removes a clk and all its child nodes from the
2162 * debugfs clk directory if clk->dentry points to debugfs created by
4dff95dc 2163 * clk_debug_register in __clk_init.
e59c5371 2164 */
4dff95dc 2165static void clk_debug_unregister(struct clk_core *core)
e59c5371 2166{
4dff95dc
SB
2167 mutex_lock(&clk_debug_lock);
2168 hlist_del_init(&core->debug_node);
2169 debugfs_remove_recursive(core->dentry);
2170 core->dentry = NULL;
2171 mutex_unlock(&clk_debug_lock);
2172}
e59c5371 2173
4dff95dc
SB
2174struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
2175 void *data, const struct file_operations *fops)
2176{
2177 struct dentry *d = NULL;
e59c5371 2178
4dff95dc
SB
2179 if (hw->core->dentry)
2180 d = debugfs_create_file(name, mode, hw->core->dentry, data,
2181 fops);
e59c5371 2182
4dff95dc
SB
2183 return d;
2184}
2185EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
e59c5371 2186
4dff95dc 2187/**
6e5ab41b 2188 * clk_debug_init - lazily populate the debugfs clk directory
4dff95dc 2189 *
6e5ab41b
SB
2190 * clks are often initialized very early during boot before memory can be
2191 * dynamically allocated and well before debugfs is setup. This function
2192 * populates the debugfs clk directory once at boot-time when we know that
2193 * debugfs is setup. It should only be called once at boot-time, all other clks
2194 * added dynamically will be done so with clk_debug_register.
4dff95dc
SB
2195 */
2196static int __init clk_debug_init(void)
2197{
2198 struct clk_core *core;
2199 struct dentry *d;
dfc202ea 2200
4dff95dc 2201 rootdir = debugfs_create_dir("clk", NULL);
e59c5371 2202
4dff95dc
SB
2203 if (!rootdir)
2204 return -ENOMEM;
dfc202ea 2205
4dff95dc
SB
2206 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
2207 &clk_summary_fops);
2208 if (!d)
2209 return -ENOMEM;
e59c5371 2210
4dff95dc
SB
2211 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
2212 &clk_dump_fops);
2213 if (!d)
2214 return -ENOMEM;
e59c5371 2215
4dff95dc
SB
2216 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
2217 &orphan_list, &clk_summary_fops);
2218 if (!d)
2219 return -ENOMEM;
e59c5371 2220
4dff95dc
SB
2221 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
2222 &orphan_list, &clk_dump_fops);
2223 if (!d)
2224 return -ENOMEM;
e59c5371 2225
4dff95dc
SB
2226 mutex_lock(&clk_debug_lock);
2227 hlist_for_each_entry(core, &clk_debug_list, debug_node)
2228 clk_debug_create_one(core, rootdir);
e59c5371 2229
4dff95dc
SB
2230 inited = 1;
2231 mutex_unlock(&clk_debug_lock);
e59c5371 2232
4dff95dc
SB
2233 return 0;
2234}
2235late_initcall(clk_debug_init);
2236#else
2237static inline int clk_debug_register(struct clk_core *core) { return 0; }
2238static inline void clk_debug_reparent(struct clk_core *core,
2239 struct clk_core *new_parent)
035a61c3 2240{
035a61c3 2241}
4dff95dc 2242static inline void clk_debug_unregister(struct clk_core *core)
3d3801ef 2243{
3d3801ef 2244}
4dff95dc 2245#endif
3d3801ef 2246
b2476490
MT
2247/**
2248 * __clk_init - initialize the data structures in a struct clk
2249 * @dev: device initializing this clk, placeholder for now
2250 * @clk: clk being initialized
2251 *
035a61c3 2252 * Initializes the lists in struct clk_core, queries the hardware for the
b2476490 2253 * parent and rate and sets them both.
b2476490 2254 */
b09d6d99 2255static int __clk_init(struct device *dev, struct clk *clk_user)
b2476490 2256{
d1302a36 2257 int i, ret = 0;
035a61c3 2258 struct clk_core *orphan;
b67bfe0d 2259 struct hlist_node *tmp2;
d6968fca 2260 struct clk_core *core;
1c8e6004 2261 unsigned long rate;
b2476490 2262
035a61c3 2263 if (!clk_user)
d1302a36 2264 return -EINVAL;
b2476490 2265
d6968fca 2266 core = clk_user->core;
035a61c3 2267
eab89f69 2268 clk_prepare_lock();
b2476490
MT
2269
2270 /* check to see if a clock with this name is already registered */
d6968fca 2271 if (clk_core_lookup(core->name)) {
d1302a36 2272 pr_debug("%s: clk %s already initialized\n",
d6968fca 2273 __func__, core->name);
d1302a36 2274 ret = -EEXIST;
b2476490 2275 goto out;
d1302a36 2276 }
b2476490 2277
d4d7e3dd 2278 /* check that clk_ops are sane. See Documentation/clk.txt */
d6968fca
SB
2279 if (core->ops->set_rate &&
2280 !((core->ops->round_rate || core->ops->determine_rate) &&
2281 core->ops->recalc_rate)) {
71472c0c 2282 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
d6968fca 2283 __func__, core->name);
d1302a36 2284 ret = -EINVAL;
d4d7e3dd
MT
2285 goto out;
2286 }
2287
d6968fca 2288 if (core->ops->set_parent && !core->ops->get_parent) {
d4d7e3dd 2289 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
d6968fca 2290 __func__, core->name);
d1302a36 2291 ret = -EINVAL;
d4d7e3dd
MT
2292 goto out;
2293 }
2294
d6968fca
SB
2295 if (core->ops->set_rate_and_parent &&
2296 !(core->ops->set_parent && core->ops->set_rate)) {
3fa2252b 2297 pr_warn("%s: %s must implement .set_parent & .set_rate\n",
d6968fca 2298 __func__, core->name);
3fa2252b
SB
2299 ret = -EINVAL;
2300 goto out;
2301 }
2302
b2476490 2303 /* throw a WARN if any entries in parent_names are NULL */
d6968fca
SB
2304 for (i = 0; i < core->num_parents; i++)
2305 WARN(!core->parent_names[i],
b2476490 2306 "%s: invalid NULL in %s's .parent_names\n",
d6968fca 2307 __func__, core->name);
b2476490
MT
2308
2309 /*
2310 * Allocate an array of struct clk *'s to avoid unnecessary string
2311 * look-ups of clk's possible parents. This can fail for clocks passed
d6968fca 2312 * in to clk_init during early boot; thus any access to core->parents[]
b2476490
MT
2313 * must always check for a NULL pointer and try to populate it if
2314 * necessary.
2315 *
d6968fca
SB
2316 * If core->parents is not NULL we skip this entire block. This allows
2317 * for clock drivers to statically initialize core->parents.
b2476490 2318 */
d6968fca
SB
2319 if (core->num_parents > 1 && !core->parents) {
2320 core->parents = kcalloc(core->num_parents, sizeof(struct clk *),
96a7ed90 2321 GFP_KERNEL);
b2476490 2322 /*
035a61c3 2323 * clk_core_lookup returns NULL for parents that have not been
b2476490
MT
2324 * clk_init'd; thus any access to clk->parents[] must check
2325 * for a NULL pointer. We can always perform lazy lookups for
2326 * missing parents later on.
2327 */
d6968fca
SB
2328 if (core->parents)
2329 for (i = 0; i < core->num_parents; i++)
2330 core->parents[i] =
2331 clk_core_lookup(core->parent_names[i]);
b2476490
MT
2332 }
2333
d6968fca 2334 core->parent = __clk_init_parent(core);
b2476490
MT
2335
2336 /*
d6968fca 2337 * Populate core->parent if parent has already been __clk_init'd. If
b2476490
MT
2338 * parent has not yet been __clk_init'd then place clk in the orphan
2339 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
2340 * clk list.
2341 *
2342 * Every time a new clk is clk_init'd then we walk the list of orphan
2343 * clocks and re-parent any that are children of the clock currently
2344 * being clk_init'd.
2345 */
d6968fca
SB
2346 if (core->parent)
2347 hlist_add_head(&core->child_node,
2348 &core->parent->children);
2349 else if (core->flags & CLK_IS_ROOT)
2350 hlist_add_head(&core->child_node, &clk_root_list);
b2476490 2351 else
d6968fca 2352 hlist_add_head(&core->child_node, &clk_orphan_list);
b2476490 2353
5279fc40
BB
2354 /*
2355 * Set clk's accuracy. The preferred method is to use
2356 * .recalc_accuracy. For simple clocks and lazy developers the default
2357 * fallback is to use the parent's accuracy. If a clock doesn't have a
2358 * parent (or is orphaned) then accuracy is set to zero (perfect
2359 * clock).
2360 */
d6968fca
SB
2361 if (core->ops->recalc_accuracy)
2362 core->accuracy = core->ops->recalc_accuracy(core->hw,
2363 __clk_get_accuracy(core->parent));
2364 else if (core->parent)
2365 core->accuracy = core->parent->accuracy;
5279fc40 2366 else
d6968fca 2367 core->accuracy = 0;
5279fc40 2368
9824cf73
MR
2369 /*
2370 * Set clk's phase.
2371 * Since a phase is by definition relative to its parent, just
2372 * query the current clock phase, or just assume it's in phase.
2373 */
d6968fca
SB
2374 if (core->ops->get_phase)
2375 core->phase = core->ops->get_phase(core->hw);
9824cf73 2376 else
d6968fca 2377 core->phase = 0;
9824cf73 2378
b2476490
MT
2379 /*
2380 * Set clk's rate. The preferred method is to use .recalc_rate. For
2381 * simple clocks and lazy developers the default fallback is to use the
2382 * parent's rate. If a clock doesn't have a parent (or is orphaned)
2383 * then rate is set to zero.
2384 */
d6968fca
SB
2385 if (core->ops->recalc_rate)
2386 rate = core->ops->recalc_rate(core->hw,
2387 clk_core_get_rate_nolock(core->parent));
2388 else if (core->parent)
2389 rate = core->parent->rate;
b2476490 2390 else
1c8e6004 2391 rate = 0;
d6968fca 2392 core->rate = core->req_rate = rate;
b2476490
MT
2393
2394 /*
2395 * walk the list of orphan clocks and reparent any that are children of
2396 * this clock
2397 */
b67bfe0d 2398 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
12d29886 2399 if (orphan->num_parents && orphan->ops->get_parent) {
1f61e5f1 2400 i = orphan->ops->get_parent(orphan->hw);
d6968fca
SB
2401 if (!strcmp(core->name, orphan->parent_names[i]))
2402 clk_core_reparent(orphan, core);
1f61e5f1
MF
2403 continue;
2404 }
2405
b2476490 2406 for (i = 0; i < orphan->num_parents; i++)
d6968fca
SB
2407 if (!strcmp(core->name, orphan->parent_names[i])) {
2408 clk_core_reparent(orphan, core);
b2476490
MT
2409 break;
2410 }
1f61e5f1 2411 }
b2476490
MT
2412
2413 /*
2414 * optional platform-specific magic
2415 *
2416 * The .init callback is not used by any of the basic clock types, but
2417 * exists for weird hardware that must perform initialization magic.
2418 * Please consider other ways of solving initialization problems before
24ee1a08 2419 * using this callback, as its use is discouraged.
b2476490 2420 */
d6968fca
SB
2421 if (core->ops->init)
2422 core->ops->init(core->hw);
b2476490 2423
d6968fca 2424 kref_init(&core->ref);
b2476490 2425out:
eab89f69 2426 clk_prepare_unlock();
b2476490 2427
89f7e9de 2428 if (!ret)
d6968fca 2429 clk_debug_register(core);
89f7e9de 2430
d1302a36 2431 return ret;
b2476490
MT
2432}
2433
035a61c3
TV
2434struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2435 const char *con_id)
0197b3ea 2436{
0197b3ea
SK
2437 struct clk *clk;
2438
035a61c3
TV
2439 /* This is to allow this function to be chained to others */
2440 if (!hw || IS_ERR(hw))
2441 return (struct clk *) hw;
0197b3ea 2442
035a61c3
TV
2443 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2444 if (!clk)
2445 return ERR_PTR(-ENOMEM);
2446
2447 clk->core = hw->core;
2448 clk->dev_id = dev_id;
2449 clk->con_id = con_id;
1c8e6004
TV
2450 clk->max_rate = ULONG_MAX;
2451
2452 clk_prepare_lock();
50595f8b 2453 hlist_add_head(&clk->clks_node, &hw->core->clks);
1c8e6004 2454 clk_prepare_unlock();
0197b3ea
SK
2455
2456 return clk;
2457}
035a61c3 2458
73e0e496 2459void __clk_free_clk(struct clk *clk)
1c8e6004
TV
2460{
2461 clk_prepare_lock();
50595f8b 2462 hlist_del(&clk->clks_node);
1c8e6004
TV
2463 clk_prepare_unlock();
2464
2465 kfree(clk);
2466}
0197b3ea 2467
293ba3b4
SB
2468/**
2469 * clk_register - allocate a new clock, register it and return an opaque cookie
2470 * @dev: device that is registering this clock
2471 * @hw: link to hardware-specific clock data
2472 *
2473 * clk_register is the primary interface for populating the clock tree with new
2474 * clock nodes. It returns a pointer to the newly allocated struct clk which
a59a5163 2475 * cannot be dereferenced by driver code but may be used in conjunction with the
293ba3b4
SB
2476 * rest of the clock API. In the event of an error clk_register will return an
2477 * error code; drivers must test for an error code after calling clk_register.
2478 */
2479struct clk *clk_register(struct device *dev, struct clk_hw *hw)
b2476490 2480{
d1302a36 2481 int i, ret;
d6968fca 2482 struct clk_core *core;
293ba3b4 2483
d6968fca
SB
2484 core = kzalloc(sizeof(*core), GFP_KERNEL);
2485 if (!core) {
293ba3b4
SB
2486 ret = -ENOMEM;
2487 goto fail_out;
2488 }
b2476490 2489
d6968fca
SB
2490 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
2491 if (!core->name) {
0197b3ea
SK
2492 ret = -ENOMEM;
2493 goto fail_name;
2494 }
d6968fca 2495 core->ops = hw->init->ops;
ac2df527 2496 if (dev && dev->driver)
d6968fca
SB
2497 core->owner = dev->driver->owner;
2498 core->hw = hw;
2499 core->flags = hw->init->flags;
2500 core->num_parents = hw->init->num_parents;
2501 hw->core = core;
b2476490 2502
d1302a36 2503 /* allocate local copy in case parent_names is __initdata */
d6968fca 2504 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
96a7ed90 2505 GFP_KERNEL);
d1302a36 2506
d6968fca 2507 if (!core->parent_names) {
d1302a36
MT
2508 ret = -ENOMEM;
2509 goto fail_parent_names;
2510 }
2511
2512
2513 /* copy each string name in case parent_names is __initdata */
d6968fca
SB
2514 for (i = 0; i < core->num_parents; i++) {
2515 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
0197b3ea 2516 GFP_KERNEL);
d6968fca 2517 if (!core->parent_names[i]) {
d1302a36
MT
2518 ret = -ENOMEM;
2519 goto fail_parent_names_copy;
2520 }
2521 }
2522
d6968fca 2523 INIT_HLIST_HEAD(&core->clks);
1c8e6004 2524
035a61c3
TV
2525 hw->clk = __clk_create_clk(hw, NULL, NULL);
2526 if (IS_ERR(hw->clk)) {
035a61c3
TV
2527 ret = PTR_ERR(hw->clk);
2528 goto fail_parent_names_copy;
2529 }
2530
2531 ret = __clk_init(dev, hw->clk);
d1302a36 2532 if (!ret)
035a61c3 2533 return hw->clk;
b2476490 2534
1c8e6004 2535 __clk_free_clk(hw->clk);
035a61c3 2536 hw->clk = NULL;
b2476490 2537
d1302a36
MT
2538fail_parent_names_copy:
2539 while (--i >= 0)
d6968fca
SB
2540 kfree_const(core->parent_names[i]);
2541 kfree(core->parent_names);
d1302a36 2542fail_parent_names:
d6968fca 2543 kfree_const(core->name);
0197b3ea 2544fail_name:
d6968fca 2545 kfree(core);
d1302a36
MT
2546fail_out:
2547 return ERR_PTR(ret);
b2476490
MT
2548}
2549EXPORT_SYMBOL_GPL(clk_register);
2550
6e5ab41b 2551/* Free memory allocated for a clock. */
fcb0ee6a
SN
2552static void __clk_release(struct kref *ref)
2553{
d6968fca
SB
2554 struct clk_core *core = container_of(ref, struct clk_core, ref);
2555 int i = core->num_parents;
fcb0ee6a 2556
496eadf8
KK
2557 lockdep_assert_held(&prepare_lock);
2558
d6968fca 2559 kfree(core->parents);
fcb0ee6a 2560 while (--i >= 0)
d6968fca 2561 kfree_const(core->parent_names[i]);
fcb0ee6a 2562
d6968fca
SB
2563 kfree(core->parent_names);
2564 kfree_const(core->name);
2565 kfree(core);
fcb0ee6a
SN
2566}
2567
2568/*
2569 * Empty clk_ops for unregistered clocks. These are used temporarily
2570 * after clk_unregister() was called on a clock and until last clock
2571 * consumer calls clk_put() and the struct clk object is freed.
2572 */
2573static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2574{
2575 return -ENXIO;
2576}
2577
2578static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2579{
2580 WARN_ON_ONCE(1);
2581}
2582
2583static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2584 unsigned long parent_rate)
2585{
2586 return -ENXIO;
2587}
2588
2589static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2590{
2591 return -ENXIO;
2592}
2593
2594static const struct clk_ops clk_nodrv_ops = {
2595 .enable = clk_nodrv_prepare_enable,
2596 .disable = clk_nodrv_disable_unprepare,
2597 .prepare = clk_nodrv_prepare_enable,
2598 .unprepare = clk_nodrv_disable_unprepare,
2599 .set_rate = clk_nodrv_set_rate,
2600 .set_parent = clk_nodrv_set_parent,
2601};
2602
1df5c939
MB
2603/**
2604 * clk_unregister - unregister a currently registered clock
2605 * @clk: clock to unregister
1df5c939 2606 */
fcb0ee6a
SN
2607void clk_unregister(struct clk *clk)
2608{
2609 unsigned long flags;
2610
6314b679
SB
2611 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2612 return;
2613
035a61c3 2614 clk_debug_unregister(clk->core);
fcb0ee6a
SN
2615
2616 clk_prepare_lock();
2617
035a61c3
TV
2618 if (clk->core->ops == &clk_nodrv_ops) {
2619 pr_err("%s: unregistered clock: %s\n", __func__,
2620 clk->core->name);
6314b679 2621 return;
fcb0ee6a
SN
2622 }
2623 /*
2624 * Assign empty clock ops for consumers that might still hold
2625 * a reference to this clock.
2626 */
2627 flags = clk_enable_lock();
035a61c3 2628 clk->core->ops = &clk_nodrv_ops;
fcb0ee6a
SN
2629 clk_enable_unlock(flags);
2630
035a61c3
TV
2631 if (!hlist_empty(&clk->core->children)) {
2632 struct clk_core *child;
874f224c 2633 struct hlist_node *t;
fcb0ee6a
SN
2634
2635 /* Reparent all children to the orphan list. */
035a61c3
TV
2636 hlist_for_each_entry_safe(child, t, &clk->core->children,
2637 child_node)
2638 clk_core_set_parent(child, NULL);
fcb0ee6a
SN
2639 }
2640
035a61c3 2641 hlist_del_init(&clk->core->child_node);
fcb0ee6a 2642
035a61c3 2643 if (clk->core->prepare_count)
fcb0ee6a 2644 pr_warn("%s: unregistering prepared clock: %s\n",
035a61c3
TV
2645 __func__, clk->core->name);
2646 kref_put(&clk->core->ref, __clk_release);
6314b679 2647
fcb0ee6a
SN
2648 clk_prepare_unlock();
2649}
1df5c939
MB
2650EXPORT_SYMBOL_GPL(clk_unregister);
2651
46c8773a
SB
2652static void devm_clk_release(struct device *dev, void *res)
2653{
293ba3b4 2654 clk_unregister(*(struct clk **)res);
46c8773a
SB
2655}
2656
2657/**
2658 * devm_clk_register - resource managed clk_register()
2659 * @dev: device that is registering this clock
2660 * @hw: link to hardware-specific clock data
2661 *
2662 * Managed clk_register(). Clocks returned from this function are
2663 * automatically clk_unregister()ed on driver detach. See clk_register() for
2664 * more information.
2665 */
2666struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2667{
2668 struct clk *clk;
293ba3b4 2669 struct clk **clkp;
46c8773a 2670
293ba3b4
SB
2671 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2672 if (!clkp)
46c8773a
SB
2673 return ERR_PTR(-ENOMEM);
2674
293ba3b4
SB
2675 clk = clk_register(dev, hw);
2676 if (!IS_ERR(clk)) {
2677 *clkp = clk;
2678 devres_add(dev, clkp);
46c8773a 2679 } else {
293ba3b4 2680 devres_free(clkp);
46c8773a
SB
2681 }
2682
2683 return clk;
2684}
2685EXPORT_SYMBOL_GPL(devm_clk_register);
2686
2687static int devm_clk_match(struct device *dev, void *res, void *data)
2688{
2689 struct clk *c = res;
2690 if (WARN_ON(!c))
2691 return 0;
2692 return c == data;
2693}
2694
2695/**
2696 * devm_clk_unregister - resource managed clk_unregister()
2697 * @clk: clock to unregister
2698 *
2699 * Deallocate a clock allocated with devm_clk_register(). Normally
2700 * this function will not need to be called and the resource management
2701 * code will ensure that the resource is freed.
2702 */
2703void devm_clk_unregister(struct device *dev, struct clk *clk)
2704{
2705 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2706}
2707EXPORT_SYMBOL_GPL(devm_clk_unregister);
2708
ac2df527
SN
2709/*
2710 * clkdev helpers
2711 */
2712int __clk_get(struct clk *clk)
2713{
035a61c3
TV
2714 struct clk_core *core = !clk ? NULL : clk->core;
2715
2716 if (core) {
2717 if (!try_module_get(core->owner))
00efcb1c 2718 return 0;
ac2df527 2719
035a61c3 2720 kref_get(&core->ref);
00efcb1c 2721 }
ac2df527
SN
2722 return 1;
2723}
2724
2725void __clk_put(struct clk *clk)
2726{
10cdfe54
TV
2727 struct module *owner;
2728
00efcb1c 2729 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
ac2df527
SN
2730 return;
2731
fcb0ee6a 2732 clk_prepare_lock();
1c8e6004 2733
50595f8b 2734 hlist_del(&clk->clks_node);
ec02ace8
TV
2735 if (clk->min_rate > clk->core->req_rate ||
2736 clk->max_rate < clk->core->req_rate)
2737 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
2738
1c8e6004
TV
2739 owner = clk->core->owner;
2740 kref_put(&clk->core->ref, __clk_release);
2741
fcb0ee6a
SN
2742 clk_prepare_unlock();
2743
10cdfe54 2744 module_put(owner);
035a61c3 2745
035a61c3 2746 kfree(clk);
ac2df527
SN
2747}
2748
b2476490
MT
2749/*** clk rate change notifiers ***/
2750
2751/**
2752 * clk_notifier_register - add a clk rate change notifier
2753 * @clk: struct clk * to watch
2754 * @nb: struct notifier_block * with callback info
2755 *
2756 * Request notification when clk's rate changes. This uses an SRCU
2757 * notifier because we want it to block and notifier unregistrations are
2758 * uncommon. The callbacks associated with the notifier must not
2759 * re-enter into the clk framework by calling any top-level clk APIs;
2760 * this will cause a nested prepare_lock mutex.
2761 *
5324fda7
SB
2762 * In all notification cases cases (pre, post and abort rate change) the
2763 * original clock rate is passed to the callback via struct
2764 * clk_notifier_data.old_rate and the new frequency is passed via struct
b2476490
MT
2765 * clk_notifier_data.new_rate.
2766 *
b2476490
MT
2767 * clk_notifier_register() must be called from non-atomic context.
2768 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2769 * allocation failure; otherwise, passes along the return value of
2770 * srcu_notifier_chain_register().
2771 */
2772int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2773{
2774 struct clk_notifier *cn;
2775 int ret = -ENOMEM;
2776
2777 if (!clk || !nb)
2778 return -EINVAL;
2779
eab89f69 2780 clk_prepare_lock();
b2476490
MT
2781
2782 /* search the list of notifiers for this clk */
2783 list_for_each_entry(cn, &clk_notifier_list, node)
2784 if (cn->clk == clk)
2785 break;
2786
2787 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2788 if (cn->clk != clk) {
2789 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2790 if (!cn)
2791 goto out;
2792
2793 cn->clk = clk;
2794 srcu_init_notifier_head(&cn->notifier_head);
2795
2796 list_add(&cn->node, &clk_notifier_list);
2797 }
2798
2799 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2800
035a61c3 2801 clk->core->notifier_count++;
b2476490
MT
2802
2803out:
eab89f69 2804 clk_prepare_unlock();
b2476490
MT
2805
2806 return ret;
2807}
2808EXPORT_SYMBOL_GPL(clk_notifier_register);
2809
2810/**
2811 * clk_notifier_unregister - remove a clk rate change notifier
2812 * @clk: struct clk *
2813 * @nb: struct notifier_block * with callback info
2814 *
2815 * Request no further notification for changes to 'clk' and frees memory
2816 * allocated in clk_notifier_register.
2817 *
2818 * Returns -EINVAL if called with null arguments; otherwise, passes
2819 * along the return value of srcu_notifier_chain_unregister().
2820 */
2821int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2822{
2823 struct clk_notifier *cn = NULL;
2824 int ret = -EINVAL;
2825
2826 if (!clk || !nb)
2827 return -EINVAL;
2828
eab89f69 2829 clk_prepare_lock();
b2476490
MT
2830
2831 list_for_each_entry(cn, &clk_notifier_list, node)
2832 if (cn->clk == clk)
2833 break;
2834
2835 if (cn->clk == clk) {
2836 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2837
035a61c3 2838 clk->core->notifier_count--;
b2476490
MT
2839
2840 /* XXX the notifier code should handle this better */
2841 if (!cn->notifier_head.head) {
2842 srcu_cleanup_notifier_head(&cn->notifier_head);
72b5322f 2843 list_del(&cn->node);
b2476490
MT
2844 kfree(cn);
2845 }
2846
2847 } else {
2848 ret = -ENOENT;
2849 }
2850
eab89f69 2851 clk_prepare_unlock();
b2476490
MT
2852
2853 return ret;
2854}
2855EXPORT_SYMBOL_GPL(clk_notifier_unregister);
766e6a4e
GL
2856
2857#ifdef CONFIG_OF
2858/**
2859 * struct of_clk_provider - Clock provider registration structure
2860 * @link: Entry in global list of clock providers
2861 * @node: Pointer to device tree node of clock provider
2862 * @get: Get clock callback. Returns NULL or a struct clk for the
2863 * given clock specifier
2864 * @data: context pointer to be passed into @get callback
2865 */
2866struct of_clk_provider {
2867 struct list_head link;
2868
2869 struct device_node *node;
2870 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2871 void *data;
2872};
2873
f2f6c255
PG
2874static const struct of_device_id __clk_of_table_sentinel
2875 __used __section(__clk_of_table_end);
2876
766e6a4e 2877static LIST_HEAD(of_clk_providers);
d6782c26
SN
2878static DEFINE_MUTEX(of_clk_mutex);
2879
766e6a4e
GL
2880struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2881 void *data)
2882{
2883 return data;
2884}
2885EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2886
494bfec9
SG
2887struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2888{
2889 struct clk_onecell_data *clk_data = data;
2890 unsigned int idx = clkspec->args[0];
2891
2892 if (idx >= clk_data->clk_num) {
2893 pr_err("%s: invalid clock index %d\n", __func__, idx);
2894 return ERR_PTR(-EINVAL);
2895 }
2896
2897 return clk_data->clks[idx];
2898}
2899EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2900
766e6a4e
GL
2901/**
2902 * of_clk_add_provider() - Register a clock provider for a node
2903 * @np: Device node pointer associated with clock provider
2904 * @clk_src_get: callback for decoding clock
2905 * @data: context pointer for @clk_src_get callback.
2906 */
2907int of_clk_add_provider(struct device_node *np,
2908 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2909 void *data),
2910 void *data)
2911{
2912 struct of_clk_provider *cp;
86be408b 2913 int ret;
766e6a4e
GL
2914
2915 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2916 if (!cp)
2917 return -ENOMEM;
2918
2919 cp->node = of_node_get(np);
2920 cp->data = data;
2921 cp->get = clk_src_get;
2922
d6782c26 2923 mutex_lock(&of_clk_mutex);
766e6a4e 2924 list_add(&cp->link, &of_clk_providers);
d6782c26 2925 mutex_unlock(&of_clk_mutex);
766e6a4e
GL
2926 pr_debug("Added clock from %s\n", np->full_name);
2927
86be408b
SN
2928 ret = of_clk_set_defaults(np, true);
2929 if (ret < 0)
2930 of_clk_del_provider(np);
2931
2932 return ret;
766e6a4e
GL
2933}
2934EXPORT_SYMBOL_GPL(of_clk_add_provider);
2935
2936/**
2937 * of_clk_del_provider() - Remove a previously registered clock provider
2938 * @np: Device node pointer associated with clock provider
2939 */
2940void of_clk_del_provider(struct device_node *np)
2941{
2942 struct of_clk_provider *cp;
2943
d6782c26 2944 mutex_lock(&of_clk_mutex);
766e6a4e
GL
2945 list_for_each_entry(cp, &of_clk_providers, link) {
2946 if (cp->node == np) {
2947 list_del(&cp->link);
2948 of_node_put(cp->node);
2949 kfree(cp);
2950 break;
2951 }
2952 }
d6782c26 2953 mutex_unlock(&of_clk_mutex);
766e6a4e
GL
2954}
2955EXPORT_SYMBOL_GPL(of_clk_del_provider);
2956
73e0e496
SB
2957struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
2958 const char *dev_id, const char *con_id)
766e6a4e
GL
2959{
2960 struct of_clk_provider *provider;
a34cd466 2961 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
766e6a4e 2962
306c342f
SB
2963 if (!clkspec)
2964 return ERR_PTR(-EINVAL);
2965
766e6a4e 2966 /* Check if we have such a provider in our array */
306c342f 2967 mutex_lock(&of_clk_mutex);
766e6a4e
GL
2968 list_for_each_entry(provider, &of_clk_providers, link) {
2969 if (provider->node == clkspec->np)
2970 clk = provider->get(clkspec, provider->data);
73e0e496
SB
2971 if (!IS_ERR(clk)) {
2972 clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
2973 con_id);
2974
2975 if (!IS_ERR(clk) && !__clk_get(clk)) {
2976 __clk_free_clk(clk);
2977 clk = ERR_PTR(-ENOENT);
2978 }
2979
766e6a4e 2980 break;
73e0e496 2981 }
766e6a4e 2982 }
306c342f 2983 mutex_unlock(&of_clk_mutex);
d6782c26
SN
2984
2985 return clk;
2986}
2987
306c342f
SB
2988/**
2989 * of_clk_get_from_provider() - Lookup a clock from a clock provider
2990 * @clkspec: pointer to a clock specifier data structure
2991 *
2992 * This function looks up a struct clk from the registered list of clock
2993 * providers, an input is a clock specifier data structure as returned
2994 * from the of_parse_phandle_with_args() function call.
2995 */
d6782c26
SN
2996struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2997{
306c342f 2998 return __of_clk_get_from_provider(clkspec, NULL, __func__);
766e6a4e
GL
2999}
3000
f6102742
MT
3001int of_clk_get_parent_count(struct device_node *np)
3002{
3003 return of_count_phandle_with_args(np, "clocks", "#clock-cells");
3004}
3005EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3006
766e6a4e
GL
3007const char *of_clk_get_parent_name(struct device_node *np, int index)
3008{
3009 struct of_phandle_args clkspec;
7a0fc1a3 3010 struct property *prop;
766e6a4e 3011 const char *clk_name;
7a0fc1a3
BD
3012 const __be32 *vp;
3013 u32 pv;
766e6a4e 3014 int rc;
7a0fc1a3 3015 int count;
766e6a4e
GL
3016
3017 if (index < 0)
3018 return NULL;
3019
3020 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3021 &clkspec);
3022 if (rc)
3023 return NULL;
3024
7a0fc1a3
BD
3025 index = clkspec.args_count ? clkspec.args[0] : 0;
3026 count = 0;
3027
3028 /* if there is an indices property, use it to transfer the index
3029 * specified into an array offset for the clock-output-names property.
3030 */
3031 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3032 if (index == pv) {
3033 index = count;
3034 break;
3035 }
3036 count++;
3037 }
3038
766e6a4e 3039 if (of_property_read_string_index(clkspec.np, "clock-output-names",
7a0fc1a3 3040 index,
766e6a4e
GL
3041 &clk_name) < 0)
3042 clk_name = clkspec.np->name;
3043
3044 of_node_put(clkspec.np);
3045 return clk_name;
3046}
3047EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3048
2e61dfb3
DN
3049/**
3050 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
3051 * number of parents
3052 * @np: Device node pointer associated with clock provider
3053 * @parents: pointer to char array that hold the parents' names
3054 * @size: size of the @parents array
3055 *
3056 * Return: number of parents for the clock node.
3057 */
3058int of_clk_parent_fill(struct device_node *np, const char **parents,
3059 unsigned int size)
3060{
3061 unsigned int i = 0;
3062
3063 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
3064 i++;
3065
3066 return i;
3067}
3068EXPORT_SYMBOL_GPL(of_clk_parent_fill);
3069
1771b10d
GC
3070struct clock_provider {
3071 of_clk_init_cb_t clk_init_cb;
3072 struct device_node *np;
3073 struct list_head node;
3074};
3075
1771b10d
GC
3076/*
3077 * This function looks for a parent clock. If there is one, then it
3078 * checks that the provider for this parent clock was initialized, in
3079 * this case the parent clock will be ready.
3080 */
3081static int parent_ready(struct device_node *np)
3082{
3083 int i = 0;
3084
3085 while (true) {
3086 struct clk *clk = of_clk_get(np, i);
3087
3088 /* this parent is ready we can check the next one */
3089 if (!IS_ERR(clk)) {
3090 clk_put(clk);
3091 i++;
3092 continue;
3093 }
3094
3095 /* at least one parent is not ready, we exit now */
3096 if (PTR_ERR(clk) == -EPROBE_DEFER)
3097 return 0;
3098
3099 /*
3100 * Here we make assumption that the device tree is
3101 * written correctly. So an error means that there is
3102 * no more parent. As we didn't exit yet, then the
3103 * previous parent are ready. If there is no clock
3104 * parent, no need to wait for them, then we can
3105 * consider their absence as being ready
3106 */
3107 return 1;
3108 }
3109}
3110
766e6a4e
GL
3111/**
3112 * of_clk_init() - Scan and init clock providers from the DT
3113 * @matches: array of compatible values and init functions for providers.
3114 *
1771b10d 3115 * This function scans the device tree for matching clock providers
e5ca8fb4 3116 * and calls their initialization functions. It also does it by trying
1771b10d 3117 * to follow the dependencies.
766e6a4e
GL
3118 */
3119void __init of_clk_init(const struct of_device_id *matches)
3120{
7f7ed584 3121 const struct of_device_id *match;
766e6a4e 3122 struct device_node *np;
1771b10d
GC
3123 struct clock_provider *clk_provider, *next;
3124 bool is_init_done;
3125 bool force = false;
2573a02a 3126 LIST_HEAD(clk_provider_list);
766e6a4e 3127
f2f6c255 3128 if (!matches)
819b4861 3129 matches = &__clk_of_table;
f2f6c255 3130
1771b10d 3131 /* First prepare the list of the clocks providers */
7f7ed584 3132 for_each_matching_node_and_match(np, matches, &match) {
2e3b19f1
SB
3133 struct clock_provider *parent;
3134
3135 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
3136 if (!parent) {
3137 list_for_each_entry_safe(clk_provider, next,
3138 &clk_provider_list, node) {
3139 list_del(&clk_provider->node);
3140 kfree(clk_provider);
3141 }
3142 return;
3143 }
1771b10d
GC
3144
3145 parent->clk_init_cb = match->data;
3146 parent->np = np;
3f6d439f 3147 list_add_tail(&parent->node, &clk_provider_list);
1771b10d
GC
3148 }
3149
3150 while (!list_empty(&clk_provider_list)) {
3151 is_init_done = false;
3152 list_for_each_entry_safe(clk_provider, next,
3153 &clk_provider_list, node) {
3154 if (force || parent_ready(clk_provider->np)) {
86be408b 3155
1771b10d 3156 clk_provider->clk_init_cb(clk_provider->np);
86be408b
SN
3157 of_clk_set_defaults(clk_provider->np, true);
3158
1771b10d
GC
3159 list_del(&clk_provider->node);
3160 kfree(clk_provider);
3161 is_init_done = true;
3162 }
3163 }
3164
3165 /*
e5ca8fb4 3166 * We didn't manage to initialize any of the
1771b10d
GC
3167 * remaining providers during the last loop, so now we
3168 * initialize all the remaining ones unconditionally
3169 * in case the clock parent was not mandatory
3170 */
3171 if (!is_init_done)
3172 force = true;
766e6a4e
GL
3173 }
3174}
3175#endif
This page took 0.339886 seconds and 5 git commands to generate.