2 * Copyright (C) 2014 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/kernel.h>
15 #include <linux/err.h>
16 #include <linux/clk-provider.h>
19 #include <linux/clkdev.h>
20 #include <linux/of_address.h>
21 #include <linux/delay.h>
23 #include "clk-iproc.h"
25 #define PLL_VCO_HIGH_SHIFT 19
26 #define PLL_VCO_LOW_SHIFT 30
28 /* number of delay loops waiting for PLL to lock */
29 #define LOCK_DELAY 100
31 /* number of VCO frequency bands */
32 #define NUM_FREQ_BANDS 8
34 #define NUM_KP_BANDS 3
41 static const unsigned int kp_table
[NUM_KP_BANDS
][NUM_FREQ_BANDS
] = {
42 { 5, 6, 6, 7, 7, 8, 9, 10 },
43 { 4, 4, 5, 5, 6, 7, 8, 9 },
44 { 4, 5, 5, 6, 7, 8, 9, 10 },
47 static const unsigned long ref_freq_table
[NUM_FREQ_BANDS
][2] = {
48 { 10000000, 12500000 },
49 { 12500000, 15000000 },
50 { 15000000, 20000000 },
51 { 20000000, 25000000 },
52 { 25000000, 50000000 },
53 { 50000000, 75000000 },
54 { 75000000, 100000000 },
55 { 100000000, 125000000 },
60 VCO_MID
= 1200000000U,
61 VCO_HIGH
= 2200000000U,
62 VCO_HIGH_HIGH
= 3100000000U,
63 VCO_MAX
= 4000000000U,
71 struct iproc_pll
*pll
;
73 const struct iproc_clk_ctrl
*ctrl
;
77 void __iomem
*pll_base
;
78 void __iomem
*pwr_base
;
79 void __iomem
*asiu_base
;
81 const struct iproc_pll_ctrl
*ctrl
;
82 const struct iproc_pll_vco_param
*vco_param
;
83 unsigned int num_vco_entries
;
85 struct clk_onecell_data clk_data
;
86 struct iproc_clk
*clks
;
89 #define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw)
92 * Based on the target frequency, find a match from the VCO frequency parameter
93 * table and return its index
95 static int pll_get_rate_index(struct iproc_pll
*pll
, unsigned int target_rate
)
99 for (i
= 0; i
< pll
->num_vco_entries
; i
++)
100 if (target_rate
== pll
->vco_param
[i
].rate
)
103 if (i
>= pll
->num_vco_entries
)
109 static int get_kp(unsigned long ref_freq
, enum kp_band kp_index
)
113 if (ref_freq
< ref_freq_table
[0][0])
116 for (i
= 0; i
< NUM_FREQ_BANDS
; i
++) {
117 if (ref_freq
>= ref_freq_table
[i
][0] &&
118 ref_freq
< ref_freq_table
[i
][1])
119 return kp_table
[kp_index
][i
];
124 static int pll_wait_for_lock(struct iproc_pll
*pll
)
127 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
129 for (i
= 0; i
< LOCK_DELAY
; i
++) {
130 u32 val
= readl(pll
->pll_base
+ ctrl
->status
.offset
);
132 if (val
& (1 << ctrl
->status
.shift
))
140 static void __pll_disable(struct iproc_pll
*pll
)
142 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
145 if (ctrl
->flags
& IPROC_CLK_PLL_ASIU
) {
146 val
= readl(pll
->asiu_base
+ ctrl
->asiu
.offset
);
147 val
&= ~(1 << ctrl
->asiu
.en_shift
);
148 writel(val
, pll
->asiu_base
+ ctrl
->asiu
.offset
);
151 /* latch input value so core power can be shut down */
152 val
= readl(pll
->pwr_base
+ ctrl
->aon
.offset
);
153 val
|= (1 << ctrl
->aon
.iso_shift
);
154 writel(val
, pll
->pwr_base
+ ctrl
->aon
.offset
);
156 /* power down the core */
157 val
&= ~(bit_mask(ctrl
->aon
.pwr_width
) << ctrl
->aon
.pwr_shift
);
158 writel(val
, pll
->pwr_base
+ ctrl
->aon
.offset
);
161 static int __pll_enable(struct iproc_pll
*pll
)
163 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
166 /* power up the PLL and make sure it's not latched */
167 val
= readl(pll
->pwr_base
+ ctrl
->aon
.offset
);
168 val
|= bit_mask(ctrl
->aon
.pwr_width
) << ctrl
->aon
.pwr_shift
;
169 val
&= ~(1 << ctrl
->aon
.iso_shift
);
170 writel(val
, pll
->pwr_base
+ ctrl
->aon
.offset
);
172 /* certain PLLs also need to be ungated from the ASIU top level */
173 if (ctrl
->flags
& IPROC_CLK_PLL_ASIU
) {
174 val
= readl(pll
->asiu_base
+ ctrl
->asiu
.offset
);
175 val
|= (1 << ctrl
->asiu
.en_shift
);
176 writel(val
, pll
->asiu_base
+ ctrl
->asiu
.offset
);
182 static void __pll_put_in_reset(struct iproc_pll
*pll
)
185 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
186 const struct iproc_pll_reset_ctrl
*reset
= &ctrl
->reset
;
188 val
= readl(pll
->pll_base
+ reset
->offset
);
189 val
&= ~(1 << reset
->reset_shift
| 1 << reset
->p_reset_shift
);
190 writel(val
, pll
->pll_base
+ reset
->offset
);
191 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
192 readl(pll
->pll_base
+ reset
->offset
);
195 static void __pll_bring_out_reset(struct iproc_pll
*pll
, unsigned int kp
,
196 unsigned int ka
, unsigned int ki
)
199 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
200 const struct iproc_pll_reset_ctrl
*reset
= &ctrl
->reset
;
202 val
= readl(pll
->pll_base
+ reset
->offset
);
203 val
&= ~(bit_mask(reset
->ki_width
) << reset
->ki_shift
|
204 bit_mask(reset
->kp_width
) << reset
->kp_shift
|
205 bit_mask(reset
->ka_width
) << reset
->ka_shift
);
206 val
|= ki
<< reset
->ki_shift
| kp
<< reset
->kp_shift
|
207 ka
<< reset
->ka_shift
;
208 val
|= 1 << reset
->reset_shift
| 1 << reset
->p_reset_shift
;
209 writel(val
, pll
->pll_base
+ reset
->offset
);
210 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
211 readl(pll
->pll_base
+ reset
->offset
);
214 static int pll_set_rate(struct iproc_clk
*clk
, unsigned int rate_index
,
215 unsigned long parent_rate
)
217 struct iproc_pll
*pll
= clk
->pll
;
218 const struct iproc_pll_vco_param
*vco
= &pll
->vco_param
[rate_index
];
219 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
220 int ka
= 0, ki
, kp
, ret
;
221 unsigned long rate
= vco
->rate
;
223 enum kp_band kp_index
;
224 unsigned long ref_freq
;
227 * reference frequency = parent frequency / PDIV
228 * If PDIV = 0, then it becomes a multiplier (x2)
231 ref_freq
= parent_rate
* 2;
233 ref_freq
= parent_rate
/ vco
->pdiv
;
235 /* determine Ki and Kp index based on target VCO frequency */
236 if (rate
>= VCO_LOW
&& rate
< VCO_HIGH
) {
238 kp_index
= KP_BAND_MID
;
239 } else if (rate
>= VCO_HIGH
&& rate
&& rate
< VCO_HIGH_HIGH
) {
241 kp_index
= KP_BAND_HIGH
;
242 } else if (rate
>= VCO_HIGH_HIGH
&& rate
< VCO_MAX
) {
244 kp_index
= KP_BAND_HIGH_HIGH
;
246 pr_err("%s: pll: %s has invalid rate: %lu\n", __func__
,
251 kp
= get_kp(ref_freq
, kp_index
);
253 pr_err("%s: pll: %s has invalid kp\n", __func__
, clk
->name
);
257 ret
= __pll_enable(pll
);
259 pr_err("%s: pll: %s fails to enable\n", __func__
, clk
->name
);
263 /* put PLL in reset */
264 __pll_put_in_reset(pll
);
266 writel(0, pll
->pll_base
+ ctrl
->vco_ctrl
.u_offset
);
267 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
268 readl(pll
->pll_base
+ ctrl
->vco_ctrl
.u_offset
);
269 val
= readl(pll
->pll_base
+ ctrl
->vco_ctrl
.l_offset
);
271 if (rate
>= VCO_LOW
&& rate
< VCO_MID
)
272 val
|= (1 << PLL_VCO_LOW_SHIFT
);
275 val
&= ~(1 << PLL_VCO_HIGH_SHIFT
);
277 val
|= (1 << PLL_VCO_HIGH_SHIFT
);
279 writel(val
, pll
->pll_base
+ ctrl
->vco_ctrl
.l_offset
);
280 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
281 readl(pll
->pll_base
+ ctrl
->vco_ctrl
.l_offset
);
283 /* program integer part of NDIV */
284 val
= readl(pll
->pll_base
+ ctrl
->ndiv_int
.offset
);
285 val
&= ~(bit_mask(ctrl
->ndiv_int
.width
) << ctrl
->ndiv_int
.shift
);
286 val
|= vco
->ndiv_int
<< ctrl
->ndiv_int
.shift
;
287 writel(val
, pll
->pll_base
+ ctrl
->ndiv_int
.offset
);
288 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
289 readl(pll
->pll_base
+ ctrl
->ndiv_int
.offset
);
291 /* program fractional part of NDIV */
292 if (ctrl
->flags
& IPROC_CLK_PLL_HAS_NDIV_FRAC
) {
293 val
= readl(pll
->pll_base
+ ctrl
->ndiv_frac
.offset
);
294 val
&= ~(bit_mask(ctrl
->ndiv_frac
.width
) <<
295 ctrl
->ndiv_frac
.shift
);
296 val
|= vco
->ndiv_frac
<< ctrl
->ndiv_frac
.shift
;
297 writel(val
, pll
->pll_base
+ ctrl
->ndiv_frac
.offset
);
298 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
299 readl(pll
->pll_base
+ ctrl
->ndiv_frac
.offset
);
303 val
= readl(pll
->pll_base
+ ctrl
->pdiv
.offset
);
304 val
&= ~(bit_mask(ctrl
->pdiv
.width
) << ctrl
->pdiv
.shift
);
305 val
|= vco
->pdiv
<< ctrl
->pdiv
.shift
;
306 writel(val
, pll
->pll_base
+ ctrl
->pdiv
.offset
);
307 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
308 readl(pll
->pll_base
+ ctrl
->pdiv
.offset
);
310 __pll_bring_out_reset(pll
, kp
, ka
, ki
);
312 ret
= pll_wait_for_lock(pll
);
314 pr_err("%s: pll: %s failed to lock\n", __func__
, clk
->name
);
321 static int iproc_pll_enable(struct clk_hw
*hw
)
323 struct iproc_clk
*clk
= to_iproc_clk(hw
);
324 struct iproc_pll
*pll
= clk
->pll
;
326 return __pll_enable(pll
);
329 static void iproc_pll_disable(struct clk_hw
*hw
)
331 struct iproc_clk
*clk
= to_iproc_clk(hw
);
332 struct iproc_pll
*pll
= clk
->pll
;
333 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
335 if (ctrl
->flags
& IPROC_CLK_AON
)
341 static unsigned long iproc_pll_recalc_rate(struct clk_hw
*hw
,
342 unsigned long parent_rate
)
344 struct iproc_clk
*clk
= to_iproc_clk(hw
);
345 struct iproc_pll
*pll
= clk
->pll
;
346 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
349 unsigned int ndiv_int
, ndiv_frac
, pdiv
;
351 if (parent_rate
== 0)
354 /* PLL needs to be locked */
355 val
= readl(pll
->pll_base
+ ctrl
->status
.offset
);
356 if ((val
& (1 << ctrl
->status
.shift
)) == 0) {
362 * PLL output frequency =
364 * ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv)
366 val
= readl(pll
->pll_base
+ ctrl
->ndiv_int
.offset
);
367 ndiv_int
= (val
>> ctrl
->ndiv_int
.shift
) &
368 bit_mask(ctrl
->ndiv_int
.width
);
369 ndiv
= (u64
)ndiv_int
<< ctrl
->ndiv_int
.shift
;
371 if (ctrl
->flags
& IPROC_CLK_PLL_HAS_NDIV_FRAC
) {
372 val
= readl(pll
->pll_base
+ ctrl
->ndiv_frac
.offset
);
373 ndiv_frac
= (val
>> ctrl
->ndiv_frac
.shift
) &
374 bit_mask(ctrl
->ndiv_frac
.width
);
377 ndiv
= ((u64
)ndiv_int
<< ctrl
->ndiv_int
.shift
) |
381 val
= readl(pll
->pll_base
+ ctrl
->pdiv
.offset
);
382 pdiv
= (val
>> ctrl
->pdiv
.shift
) & bit_mask(ctrl
->pdiv
.width
);
384 clk
->rate
= (ndiv
* parent_rate
) >> ctrl
->ndiv_int
.shift
;
394 static long iproc_pll_round_rate(struct clk_hw
*hw
, unsigned long rate
,
395 unsigned long *parent_rate
)
398 struct iproc_clk
*clk
= to_iproc_clk(hw
);
399 struct iproc_pll
*pll
= clk
->pll
;
401 if (rate
== 0 || *parent_rate
== 0 || !pll
->vco_param
)
404 for (i
= 0; i
< pll
->num_vco_entries
; i
++) {
405 if (rate
<= pll
->vco_param
[i
].rate
)
409 if (i
== pll
->num_vco_entries
)
412 return pll
->vco_param
[i
].rate
;
415 static int iproc_pll_set_rate(struct clk_hw
*hw
, unsigned long rate
,
416 unsigned long parent_rate
)
418 struct iproc_clk
*clk
= to_iproc_clk(hw
);
419 struct iproc_pll
*pll
= clk
->pll
;
422 rate_index
= pll_get_rate_index(pll
, rate
);
426 ret
= pll_set_rate(clk
, rate_index
, parent_rate
);
430 static const struct clk_ops iproc_pll_ops
= {
431 .enable
= iproc_pll_enable
,
432 .disable
= iproc_pll_disable
,
433 .recalc_rate
= iproc_pll_recalc_rate
,
434 .round_rate
= iproc_pll_round_rate
,
435 .set_rate
= iproc_pll_set_rate
,
438 static int iproc_clk_enable(struct clk_hw
*hw
)
440 struct iproc_clk
*clk
= to_iproc_clk(hw
);
441 const struct iproc_clk_ctrl
*ctrl
= clk
->ctrl
;
442 struct iproc_pll
*pll
= clk
->pll
;
445 /* channel enable is active low */
446 val
= readl(pll
->pll_base
+ ctrl
->enable
.offset
);
447 val
&= ~(1 << ctrl
->enable
.enable_shift
);
448 writel(val
, pll
->pll_base
+ ctrl
->enable
.offset
);
450 /* also make sure channel is not held */
451 val
= readl(pll
->pll_base
+ ctrl
->enable
.offset
);
452 val
&= ~(1 << ctrl
->enable
.hold_shift
);
453 writel(val
, pll
->pll_base
+ ctrl
->enable
.offset
);
454 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
455 readl(pll
->pll_base
+ ctrl
->enable
.offset
);
460 static void iproc_clk_disable(struct clk_hw
*hw
)
462 struct iproc_clk
*clk
= to_iproc_clk(hw
);
463 const struct iproc_clk_ctrl
*ctrl
= clk
->ctrl
;
464 struct iproc_pll
*pll
= clk
->pll
;
467 if (ctrl
->flags
& IPROC_CLK_AON
)
470 val
= readl(pll
->pll_base
+ ctrl
->enable
.offset
);
471 val
|= 1 << ctrl
->enable
.enable_shift
;
472 writel(val
, pll
->pll_base
+ ctrl
->enable
.offset
);
473 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
474 readl(pll
->pll_base
+ ctrl
->enable
.offset
);
477 static unsigned long iproc_clk_recalc_rate(struct clk_hw
*hw
,
478 unsigned long parent_rate
)
480 struct iproc_clk
*clk
= to_iproc_clk(hw
);
481 const struct iproc_clk_ctrl
*ctrl
= clk
->ctrl
;
482 struct iproc_pll
*pll
= clk
->pll
;
486 if (parent_rate
== 0)
489 val
= readl(pll
->pll_base
+ ctrl
->mdiv
.offset
);
490 mdiv
= (val
>> ctrl
->mdiv
.shift
) & bit_mask(ctrl
->mdiv
.width
);
494 clk
->rate
= parent_rate
/ mdiv
;
499 static long iproc_clk_round_rate(struct clk_hw
*hw
, unsigned long rate
,
500 unsigned long *parent_rate
)
504 if (rate
== 0 || *parent_rate
== 0)
507 if (rate
== *parent_rate
)
510 div
= DIV_ROUND_UP(*parent_rate
, rate
);
517 return *parent_rate
/ div
;
520 static int iproc_clk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
521 unsigned long parent_rate
)
523 struct iproc_clk
*clk
= to_iproc_clk(hw
);
524 const struct iproc_clk_ctrl
*ctrl
= clk
->ctrl
;
525 struct iproc_pll
*pll
= clk
->pll
;
529 if (rate
== 0 || parent_rate
== 0)
532 div
= DIV_ROUND_UP(parent_rate
, rate
);
536 val
= readl(pll
->pll_base
+ ctrl
->mdiv
.offset
);
538 val
&= ~(bit_mask(ctrl
->mdiv
.width
) << ctrl
->mdiv
.shift
);
540 val
&= ~(bit_mask(ctrl
->mdiv
.width
) << ctrl
->mdiv
.shift
);
541 val
|= div
<< ctrl
->mdiv
.shift
;
543 writel(val
, pll
->pll_base
+ ctrl
->mdiv
.offset
);
544 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
545 readl(pll
->pll_base
+ ctrl
->mdiv
.offset
);
546 clk
->rate
= parent_rate
/ div
;
551 static const struct clk_ops iproc_clk_ops
= {
552 .enable
= iproc_clk_enable
,
553 .disable
= iproc_clk_disable
,
554 .recalc_rate
= iproc_clk_recalc_rate
,
555 .round_rate
= iproc_clk_round_rate
,
556 .set_rate
= iproc_clk_set_rate
,
560 * Some PLLs require the PLL SW override bit to be set before changes can be
563 static void iproc_pll_sw_cfg(struct iproc_pll
*pll
)
565 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
567 if (ctrl
->flags
& IPROC_CLK_PLL_NEEDS_SW_CFG
) {
570 val
= readl(pll
->pll_base
+ ctrl
->sw_ctrl
.offset
);
571 val
|= BIT(ctrl
->sw_ctrl
.shift
);
572 writel(val
, pll
->pll_base
+ ctrl
->sw_ctrl
.offset
);
573 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
))
574 readl(pll
->pll_base
+ ctrl
->sw_ctrl
.offset
);
578 void __init
iproc_pll_clk_setup(struct device_node
*node
,
579 const struct iproc_pll_ctrl
*pll_ctrl
,
580 const struct iproc_pll_vco_param
*vco
,
581 unsigned int num_vco_entries
,
582 const struct iproc_clk_ctrl
*clk_ctrl
,
583 unsigned int num_clks
)
587 struct iproc_pll
*pll
;
588 struct iproc_clk
*iclk
;
589 struct clk_init_data init
;
590 const char *parent_name
;
592 if (WARN_ON(!pll_ctrl
) || WARN_ON(!clk_ctrl
))
595 pll
= kzalloc(sizeof(*pll
), GFP_KERNEL
);
599 pll
->clk_data
.clk_num
= num_clks
;
600 pll
->clk_data
.clks
= kcalloc(num_clks
, sizeof(*pll
->clk_data
.clks
),
602 if (WARN_ON(!pll
->clk_data
.clks
))
605 pll
->clks
= kcalloc(num_clks
, sizeof(*pll
->clks
), GFP_KERNEL
);
606 if (WARN_ON(!pll
->clks
))
609 pll
->pll_base
= of_iomap(node
, 0);
610 if (WARN_ON(!pll
->pll_base
))
613 pll
->pwr_base
= of_iomap(node
, 1);
614 if (WARN_ON(!pll
->pwr_base
))
617 /* some PLLs require gating control at the top ASIU level */
618 if (pll_ctrl
->flags
& IPROC_CLK_PLL_ASIU
) {
619 pll
->asiu_base
= of_iomap(node
, 2);
620 if (WARN_ON(!pll
->asiu_base
))
624 /* initialize and register the PLL itself */
625 pll
->ctrl
= pll_ctrl
;
627 iclk
= &pll
->clks
[0];
629 iclk
->name
= node
->name
;
631 init
.name
= node
->name
;
632 init
.ops
= &iproc_pll_ops
;
634 parent_name
= of_clk_get_parent_name(node
, 0);
635 init
.parent_names
= (parent_name
? &parent_name
: NULL
);
636 init
.num_parents
= (parent_name
? 1 : 0);
637 iclk
->hw
.init
= &init
;
640 pll
->num_vco_entries
= num_vco_entries
;
641 pll
->vco_param
= vco
;
644 iproc_pll_sw_cfg(pll
);
646 clk
= clk_register(NULL
, &iclk
->hw
);
647 if (WARN_ON(IS_ERR(clk
)))
648 goto err_pll_register
;
650 pll
->clk_data
.clks
[0] = clk
;
652 /* now initialize and register all leaf clocks */
653 for (i
= 1; i
< num_clks
; i
++) {
654 const char *clk_name
;
656 memset(&init
, 0, sizeof(init
));
657 parent_name
= node
->name
;
659 ret
= of_property_read_string_index(node
, "clock-output-names",
662 goto err_clk_register
;
664 iclk
= &pll
->clks
[i
];
665 iclk
->name
= clk_name
;
667 iclk
->ctrl
= &clk_ctrl
[i
];
669 init
.name
= clk_name
;
670 init
.ops
= &iproc_clk_ops
;
672 init
.parent_names
= (parent_name
? &parent_name
: NULL
);
673 init
.num_parents
= (parent_name
? 1 : 0);
674 iclk
->hw
.init
= &init
;
676 clk
= clk_register(NULL
, &iclk
->hw
);
677 if (WARN_ON(IS_ERR(clk
)))
678 goto err_clk_register
;
680 pll
->clk_data
.clks
[i
] = clk
;
683 ret
= of_clk_add_provider(node
, of_clk_src_onecell_get
, &pll
->clk_data
);
685 goto err_clk_register
;
690 for (i
= 0; i
< num_clks
; i
++)
691 clk_unregister(pll
->clk_data
.clks
[i
]);
695 iounmap(pll
->asiu_base
);
698 iounmap(pll
->pwr_base
);
701 iounmap(pll
->pll_base
);
707 kfree(pll
->clk_data
.clks
);