Commit | Line | Data |
---|---|---|
ab8ba01b GC |
1 | /* |
2 | * Marvell MVEBU CPU clock handling. | |
3 | * | |
4 | * Copyright (C) 2012 Marvell | |
5 | * | |
6 | * Gregory CLEMENT <gregory.clement@free-electrons.com> | |
7 | * | |
8 | * This file is licensed under the terms of the GNU General Public | |
9 | * License version 2. This program is licensed "as is" without any | |
10 | * warranty of any kind, whether express or implied. | |
11 | */ | |
12 | #include <linux/kernel.h> | |
db00c3e5 SB |
13 | #include <linux/slab.h> |
14 | #include <linux/clk.h> | |
ab8ba01b GC |
15 | #include <linux/clk-provider.h> |
16 | #include <linux/of_address.h> | |
17 | #include <linux/io.h> | |
18 | #include <linux/of.h> | |
19 | #include <linux/delay.h> | |
ee2d8ea1 TP |
20 | #include <linux/mvebu-pmsu.h> |
21 | #include <asm/smp_plat.h> | |
ab8ba01b | 22 | |
ee2d8ea1 TP |
23 | #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0 |
24 | #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff | |
25 | #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8 | |
26 | #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8 | |
27 | #define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16 | |
28 | #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC | |
29 | #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F | |
30 | ||
31 | #define PMU_DFS_RATIO_SHIFT 16 | |
32 | #define PMU_DFS_RATIO_MASK 0x3F | |
ab8ba01b GC |
33 | |
34 | #define MAX_CPU 4 | |
35 | struct cpu_clk { | |
36 | struct clk_hw hw; | |
37 | int cpu; | |
38 | const char *clk_name; | |
39 | const char *parent_name; | |
40 | void __iomem *reg_base; | |
ee2d8ea1 | 41 | void __iomem *pmu_dfs; |
ab8ba01b GC |
42 | }; |
43 | ||
44 | static struct clk **clks; | |
45 | ||
46 | static struct clk_onecell_data clk_data; | |
47 | ||
48 | #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw) | |
49 | ||
50 | static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk, | |
51 | unsigned long parent_rate) | |
52 | { | |
53 | struct cpu_clk *cpuclk = to_cpu_clk(hwclk); | |
54 | u32 reg, div; | |
55 | ||
56 | reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET); | |
57 | div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK; | |
58 | return parent_rate / div; | |
59 | } | |
60 | ||
61 | static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate, | |
62 | unsigned long *parent_rate) | |
63 | { | |
64 | /* Valid ratio are 1:1, 1:2 and 1:3 */ | |
65 | u32 div; | |
66 | ||
67 | div = *parent_rate / rate; | |
68 | if (div == 0) | |
69 | div = 1; | |
70 | else if (div > 3) | |
71 | div = 3; | |
72 | ||
73 | return *parent_rate / div; | |
74 | } | |
75 | ||
ee2d8ea1 TP |
76 | static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate, |
77 | unsigned long parent_rate) | |
78 | ||
ab8ba01b GC |
79 | { |
80 | struct cpu_clk *cpuclk = to_cpu_clk(hwclk); | |
81 | u32 reg, div; | |
82 | u32 reload_mask; | |
83 | ||
84 | div = parent_rate / rate; | |
85 | reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET) | |
86 | & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8)))) | |
87 | | (div << (cpuclk->cpu * 8)); | |
88 | writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET); | |
89 | /* Set clock divider reload smooth bit mask */ | |
90 | reload_mask = 1 << (20 + cpuclk->cpu); | |
91 | ||
92 | reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET) | |
93 | | reload_mask; | |
94 | writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); | |
95 | ||
96 | /* Now trigger the clock update */ | |
97 | reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET) | |
98 | | 1 << 24; | |
99 | writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); | |
100 | ||
101 | /* Wait for clocks to settle down then clear reload request */ | |
102 | udelay(1000); | |
103 | reg &= ~(reload_mask | 1 << 24); | |
104 | writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); | |
105 | udelay(1000); | |
106 | ||
107 | return 0; | |
108 | } | |
109 | ||
ee2d8ea1 TP |
110 | static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate, |
111 | unsigned long parent_rate) | |
112 | { | |
113 | u32 reg; | |
114 | unsigned long fabric_div, target_div, cur_rate; | |
115 | struct cpu_clk *cpuclk = to_cpu_clk(hwclk); | |
116 | ||
117 | /* | |
118 | * PMU DFS registers are not mapped, Device Tree does not | |
119 | * describes them. We cannot change the frequency dynamically. | |
120 | */ | |
121 | if (!cpuclk->pmu_dfs) | |
122 | return -ENODEV; | |
123 | ||
eca61c9f | 124 | cur_rate = clk_hw_get_rate(hwclk); |
ee2d8ea1 TP |
125 | |
126 | reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET); | |
127 | fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) & | |
128 | SYS_CTRL_CLK_DIVIDER_MASK; | |
129 | ||
130 | /* Frequency is going up */ | |
131 | if (rate == 2 * cur_rate) | |
132 | target_div = fabric_div / 2; | |
133 | /* Frequency is going down */ | |
134 | else | |
135 | target_div = fabric_div; | |
136 | ||
137 | if (target_div == 0) | |
138 | target_div = 1; | |
139 | ||
140 | reg = readl(cpuclk->pmu_dfs); | |
141 | reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT); | |
142 | reg |= (target_div << PMU_DFS_RATIO_SHIFT); | |
143 | writel(reg, cpuclk->pmu_dfs); | |
144 | ||
145 | reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); | |
146 | reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL << | |
147 | SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT); | |
148 | writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); | |
149 | ||
150 | return mvebu_pmsu_dfs_request(cpuclk->cpu); | |
151 | } | |
152 | ||
153 | static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate, | |
154 | unsigned long parent_rate) | |
155 | { | |
156 | if (__clk_is_enabled(hwclk->clk)) | |
157 | return clk_cpu_on_set_rate(hwclk, rate, parent_rate); | |
158 | else | |
159 | return clk_cpu_off_set_rate(hwclk, rate, parent_rate); | |
160 | } | |
161 | ||
ab8ba01b GC |
162 | static const struct clk_ops cpu_ops = { |
163 | .recalc_rate = clk_cpu_recalc_rate, | |
164 | .round_rate = clk_cpu_round_rate, | |
165 | .set_rate = clk_cpu_set_rate, | |
166 | }; | |
167 | ||
9ac81751 | 168 | static void __init of_cpu_clk_setup(struct device_node *node) |
ab8ba01b GC |
169 | { |
170 | struct cpu_clk *cpuclk; | |
171 | void __iomem *clock_complex_base = of_iomap(node, 0); | |
ee2d8ea1 | 172 | void __iomem *pmu_dfs_base = of_iomap(node, 1); |
ab8ba01b GC |
173 | int ncpus = 0; |
174 | struct device_node *dn; | |
175 | ||
176 | if (clock_complex_base == NULL) { | |
177 | pr_err("%s: clock-complex base register not set\n", | |
178 | __func__); | |
179 | return; | |
180 | } | |
181 | ||
ee2d8ea1 TP |
182 | if (pmu_dfs_base == NULL) |
183 | pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n", | |
184 | __func__); | |
185 | ||
ab8ba01b GC |
186 | for_each_node_by_type(dn, "cpu") |
187 | ncpus++; | |
188 | ||
189 | cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL); | |
190 | if (WARN_ON(!cpuclk)) | |
f98d007d | 191 | goto cpuclk_out; |
ab8ba01b GC |
192 | |
193 | clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL); | |
194 | if (WARN_ON(!clks)) | |
d6f620a4 | 195 | goto clks_out; |
ab8ba01b GC |
196 | |
197 | for_each_node_by_type(dn, "cpu") { | |
198 | struct clk_init_data init; | |
199 | struct clk *clk; | |
ab8ba01b GC |
200 | char *clk_name = kzalloc(5, GFP_KERNEL); |
201 | int cpu, err; | |
202 | ||
203 | if (WARN_ON(!clk_name)) | |
d6f620a4 | 204 | goto bail_out; |
ab8ba01b GC |
205 | |
206 | err = of_property_read_u32(dn, "reg", &cpu); | |
207 | if (WARN_ON(err)) | |
d6f620a4 | 208 | goto bail_out; |
ab8ba01b GC |
209 | |
210 | sprintf(clk_name, "cpu%d", cpu); | |
ab8ba01b | 211 | |
61e22fff | 212 | cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0); |
ab8ba01b GC |
213 | cpuclk[cpu].clk_name = clk_name; |
214 | cpuclk[cpu].cpu = cpu; | |
215 | cpuclk[cpu].reg_base = clock_complex_base; | |
ee2d8ea1 TP |
216 | if (pmu_dfs_base) |
217 | cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu; | |
ab8ba01b GC |
218 | cpuclk[cpu].hw.init = &init; |
219 | ||
220 | init.name = cpuclk[cpu].clk_name; | |
221 | init.ops = &cpu_ops; | |
222 | init.flags = 0; | |
223 | init.parent_names = &cpuclk[cpu].parent_name; | |
224 | init.num_parents = 1; | |
225 | ||
226 | clk = clk_register(NULL, &cpuclk[cpu].hw); | |
227 | if (WARN_ON(IS_ERR(clk))) | |
228 | goto bail_out; | |
229 | clks[cpu] = clk; | |
230 | } | |
231 | clk_data.clk_num = MAX_CPU; | |
232 | clk_data.clks = clks; | |
233 | of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data); | |
234 | ||
235 | return; | |
236 | bail_out: | |
237 | kfree(clks); | |
d6f620a4 CD |
238 | while(ncpus--) |
239 | kfree(cpuclk[ncpus].clk_name); | |
240 | clks_out: | |
ab8ba01b | 241 | kfree(cpuclk); |
f98d007d JZ |
242 | cpuclk_out: |
243 | iounmap(clock_complex_base); | |
ab8ba01b GC |
244 | } |
245 | ||
f640c0fa JFM |
246 | CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock", |
247 | of_cpu_clk_setup); |