clk: samsung: add infrastructure to register cpu clocks
[deliverable/linux.git] / drivers / clk / samsung / clk-cpu.c
CommitLineData
ddeac8d9
TA
1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
3 * Author: Thomas Abraham <thomas.ab@samsung.com>
4 *
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This file contains the utility function to register CPU clock for Samsung
13 * Exynos platforms. A CPU clock is defined as a clock supplied to a CPU or a
14 * group of CPUs. The CPU clock is typically derived from a hierarchy of clock
15 * blocks which includes mux and divider blocks. There are a number of other
16 * auxiliary clocks supplied to the CPU domain such as the debug blocks and AXI
17 * clock for CPU domain. The rates of these auxiliary clocks are related to the
18 * CPU clock rate and this relation is usually specified in the hardware manual
19 * of the SoC or supplied after the SoC characterization.
20 *
21 * The below implementation of the CPU clock allows the rate changes of the CPU
22 * clock and the corresponding rate changes of the auxillary clocks of the CPU
23 * domain. The platform clock driver provides a clock register configuration
24 * for each configurable rate which is then used to program the clock hardware
25 * registers to acheive a fast co-oridinated rate change for all the CPU domain
26 * clocks.
27 *
28 * On a rate change request for the CPU clock, the rate change is propagated
29 * upto the PLL supplying the clock to the CPU domain clock blocks. While the
30 * CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
31 * alternate clock source. If required, the alternate clock source is divided
32 * down in order to keep the output clock rate within the previous OPP limits.
33*/
34
35#include <linux/errno.h>
36#include "clk-cpu.h"
37
38#define E4210_SRC_CPU 0x0
39#define E4210_STAT_CPU 0x200
40#define E4210_DIV_CPU0 0x300
41#define E4210_DIV_CPU1 0x304
42#define E4210_DIV_STAT_CPU0 0x400
43#define E4210_DIV_STAT_CPU1 0x404
44
45#define E4210_DIV0_RATIO0_MASK 0x7
46#define E4210_DIV1_HPM_MASK (0x7 << 4)
47#define E4210_DIV1_COPY_MASK (0x7 << 0)
48#define E4210_MUX_HPM_MASK (1 << 20)
49#define E4210_DIV0_ATB_SHIFT 16
50#define E4210_DIV0_ATB_MASK (DIV_MASK << E4210_DIV0_ATB_SHIFT)
51
52#define MAX_DIV 8
53#define DIV_MASK 7
54#define DIV_MASK_ALL 0xffffffff
55#define MUX_MASK 7
56
57/*
58 * Helper function to wait until divider(s) have stabilized after the divider
59 * value has changed.
60 */
61static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
62{
63 unsigned long timeout = jiffies + msecs_to_jiffies(10);
64
65 do {
66 if (!(readl(div_reg) & mask))
67 return;
68 } while (time_before(jiffies, timeout));
69
70 if (!(readl(div_reg) & mask))
71 return;
72
73 pr_err("%s: timeout in divider stablization\n", __func__);
74}
75
76/*
77 * Helper function to wait until mux has stabilized after the mux selection
78 * value was changed.
79 */
80static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
81 unsigned long mux_value)
82{
83 unsigned long timeout = jiffies + msecs_to_jiffies(10);
84
85 do {
86 if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
87 return;
88 } while (time_before(jiffies, timeout));
89
90 if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
91 return;
92
93 pr_err("%s: re-parenting mux timed-out\n", __func__);
94}
95
96/* common round rate callback useable for all types of CPU clocks */
97static long exynos_cpuclk_round_rate(struct clk_hw *hw,
98 unsigned long drate, unsigned long *prate)
99{
100 struct clk *parent = __clk_get_parent(hw->clk);
101 *prate = __clk_round_rate(parent, drate);
102 return *prate;
103}
104
105/* common recalc rate callback useable for all types of CPU clocks */
106static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
107 unsigned long parent_rate)
108{
109 /*
110 * The CPU clock output (armclk) rate is the same as its parent
111 * rate. Although there exist certain dividers inside the CPU
112 * clock block that could be used to divide the parent clock,
113 * the driver does not make use of them currently, except during
114 * frequency transitions.
115 */
116 return parent_rate;
117}
118
119static const struct clk_ops exynos_cpuclk_clk_ops = {
120 .recalc_rate = exynos_cpuclk_recalc_rate,
121 .round_rate = exynos_cpuclk_round_rate,
122};
123
124/*
125 * Helper function to set the 'safe' dividers for the CPU clock. The parameters
126 * div and mask contain the divider value and the register bit mask of the
127 * dividers to be programmed.
128 */
129static void exynos_set_safe_div(void __iomem *base, unsigned long div,
130 unsigned long mask)
131{
132 unsigned long div0;
133
134 div0 = readl(base + E4210_DIV_CPU0);
135 div0 = (div0 & ~mask) | (div & mask);
136 writel(div0, base + E4210_DIV_CPU0);
137 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, mask);
138}
139
140/* handler for pre-rate change notification from parent clock */
141static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
142 struct exynos_cpuclk *cpuclk, void __iomem *base)
143{
144 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
145 unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
146 unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
147 unsigned long div0, div1 = 0, mux_reg;
148
149 /* find out the divider values to use for clock data */
150 while ((cfg_data->prate * 1000) != ndata->new_rate) {
151 if (cfg_data->prate == 0)
152 return -EINVAL;
153 cfg_data++;
154 }
155
156 spin_lock(cpuclk->lock);
157
158 /*
159 * For the selected PLL clock frequency, get the pre-defined divider
160 * values. If the clock for sclk_hpm is not sourced from apll, then
161 * the values for DIV_COPY and DIV_HPM dividers need not be set.
162 */
163 div0 = cfg_data->div0;
164 if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
165 div1 = cfg_data->div1;
166 if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
167 div1 = readl(base + E4210_DIV_CPU1) &
168 (E4210_DIV1_HPM_MASK | E4210_DIV1_COPY_MASK);
169 }
170
171 /*
172 * If the old parent clock speed is less than the clock speed of
173 * the alternate parent, then it should be ensured that at no point
174 * the armclk speed is more than the old_prate until the dividers are
175 * set. Also workaround the issue of the dividers being set to lower
176 * values before the parent clock speed is set to new lower speed
177 * (this can result in too high speed of armclk output clocks).
178 */
179 if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
180 unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
181
182 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
183 WARN_ON(alt_div >= MAX_DIV);
184
185 if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
186 /*
187 * In Exynos4210, ATB clock parent is also mout_core. So
188 * ATB clock also needs to be mantained at safe speed.
189 */
190 alt_div |= E4210_DIV0_ATB_MASK;
191 alt_div_mask |= E4210_DIV0_ATB_MASK;
192 }
193 exynos_set_safe_div(base, alt_div, alt_div_mask);
194 div0 |= alt_div;
195 }
196
197 /* select sclk_mpll as the alternate parent */
198 mux_reg = readl(base + E4210_SRC_CPU);
199 writel(mux_reg | (1 << 16), base + E4210_SRC_CPU);
200 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 2);
201
202 /* alternate parent is active now. set the dividers */
203 writel(div0, base + E4210_DIV_CPU0);
204 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
205
206 if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
207 writel(div1, base + E4210_DIV_CPU1);
208 wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
209 DIV_MASK_ALL);
210 }
211
212 spin_unlock(cpuclk->lock);
213 return 0;
214}
215
216/* handler for post-rate change notification from parent clock */
217static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
218 struct exynos_cpuclk *cpuclk, void __iomem *base)
219{
220 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
221 unsigned long div = 0, div_mask = DIV_MASK;
222 unsigned long mux_reg;
223
224 /* find out the divider values to use for clock data */
225 if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
226 while ((cfg_data->prate * 1000) != ndata->new_rate) {
227 if (cfg_data->prate == 0)
228 return -EINVAL;
229 cfg_data++;
230 }
231 }
232
233 spin_lock(cpuclk->lock);
234
235 /* select mout_apll as the alternate parent */
236 mux_reg = readl(base + E4210_SRC_CPU);
237 writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
238 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
239
240 if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
241 div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
242 div_mask |= E4210_DIV0_ATB_MASK;
243 }
244
245 exynos_set_safe_div(base, div, div_mask);
246 spin_unlock(cpuclk->lock);
247 return 0;
248}
249
250/*
251 * This notifier function is called for the pre-rate and post-rate change
252 * notifications of the parent clock of cpuclk.
253 */
254static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
255 unsigned long event, void *data)
256{
257 struct clk_notifier_data *ndata = data;
258 struct exynos_cpuclk *cpuclk;
259 void __iomem *base;
260 int err = 0;
261
262 cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
263 base = cpuclk->ctrl_base;
264
265 if (event == PRE_RATE_CHANGE)
266 err = exynos_cpuclk_pre_rate_change(ndata, cpuclk, base);
267 else if (event == POST_RATE_CHANGE)
268 err = exynos_cpuclk_post_rate_change(ndata, cpuclk, base);
269
270 return notifier_from_errno(err);
271}
272
273/* helper function to register a CPU clock */
274int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
275 unsigned int lookup_id, const char *name, const char *parent,
276 const char *alt_parent, unsigned long offset,
277 const struct exynos_cpuclk_cfg_data *cfg,
278 unsigned long num_cfgs, unsigned long flags)
279{
280 struct exynos_cpuclk *cpuclk;
281 struct clk_init_data init;
282 struct clk *clk;
283 int ret = 0;
284
285 cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
286 if (!cpuclk)
287 return -ENOMEM;
288
289 init.name = name;
290 init.flags = CLK_SET_RATE_PARENT;
291 init.parent_names = &parent;
292 init.num_parents = 1;
293 init.ops = &exynos_cpuclk_clk_ops;
294
295 cpuclk->hw.init = &init;
296 cpuclk->ctrl_base = ctx->reg_base + offset;
297 cpuclk->lock = &ctx->lock;
298 cpuclk->flags = flags;
299 cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
300
301 cpuclk->alt_parent = __clk_lookup(alt_parent);
302 if (!cpuclk->alt_parent) {
303 pr_err("%s: could not lookup alternate parent %s\n",
304 __func__, alt_parent);
305 ret = -EINVAL;
306 goto free_cpuclk;
307 }
308
309 clk = __clk_lookup(parent);
310 if (!clk) {
311 pr_err("%s: could not lookup parent clock %s\n",
312 __func__, parent);
313 ret = -EINVAL;
314 goto free_cpuclk;
315 }
316
317 ret = clk_notifier_register(clk, &cpuclk->clk_nb);
318 if (ret) {
319 pr_err("%s: failed to register clock notifier for %s\n",
320 __func__, name);
321 goto free_cpuclk;
322 }
323
324 cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
325 if (!cpuclk->cfg) {
326 pr_err("%s: could not allocate memory for cpuclk data\n",
327 __func__);
328 ret = -ENOMEM;
329 goto unregister_clk_nb;
330 }
331
332 clk = clk_register(NULL, &cpuclk->hw);
333 if (IS_ERR(clk)) {
334 pr_err("%s: could not register cpuclk %s\n", __func__, name);
335 ret = PTR_ERR(clk);
336 goto free_cpuclk_data;
337 }
338
339 samsung_clk_add_lookup(ctx, clk, lookup_id);
340 return 0;
341
342free_cpuclk_data:
343 kfree(cpuclk->cfg);
344unregister_clk_nb:
345 clk_notifier_unregister(__clk_lookup(parent), &cpuclk->clk_nb);
346free_cpuclk:
347 kfree(cpuclk);
348 return ret;
349}
This page took 0.050665 seconds and 5 git commands to generate.