Commit | Line | Data |
---|---|---|
7444dad2 GC |
1 | /* |
2 | * Power Management Service Unit(PMSU) support for Armada 370/XP platforms. | |
3 | * | |
4 | * Copyright (C) 2012 Marvell | |
5 | * | |
6 | * Yehuda Yitschak <yehuday@marvell.com> | |
7 | * Gregory Clement <gregory.clement@free-electrons.com> | |
8 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | |
9 | * | |
10 | * This file is licensed under the terms of the GNU General Public | |
11 | * License version 2. This program is licensed "as is" without any | |
12 | * warranty of any kind, whether express or implied. | |
13 | * | |
14 | * The Armada 370 and Armada XP SOCs have a power management service | |
15 | * unit which is responsible for powering down and waking up CPUs and | |
16 | * other SOC units | |
17 | */ | |
18 | ||
bd045a1e TP |
19 | #define pr_fmt(fmt) "mvebu-pmsu: " fmt |
20 | ||
a509ea84 | 21 | #include <linux/clk.h> |
d163ee16 | 22 | #include <linux/cpu_pm.h> |
a509ea84 | 23 | #include <linux/delay.h> |
7444dad2 | 24 | #include <linux/init.h> |
7444dad2 | 25 | #include <linux/io.h> |
3e328428 | 26 | #include <linux/kernel.h> |
3076cc58 | 27 | #include <linux/mbus.h> |
7444dad2 | 28 | #include <linux/of_address.h> |
a509ea84 | 29 | #include <linux/of_device.h> |
8c16babc | 30 | #include <linux/platform_device.h> |
a509ea84 | 31 | #include <linux/pm_opp.h> |
49754ffe | 32 | #include <linux/resource.h> |
a509ea84 | 33 | #include <linux/slab.h> |
3e328428 | 34 | #include <linux/smp.h> |
c3e04cab GC |
35 | #include <asm/cacheflush.h> |
36 | #include <asm/cp15.h> | |
e53b1fd4 | 37 | #include <asm/smp_scu.h> |
7444dad2 | 38 | #include <asm/smp_plat.h> |
c3e04cab GC |
39 | #include <asm/suspend.h> |
40 | #include <asm/tlbflush.h> | |
49754ffe | 41 | #include "common.h" |
7444dad2 | 42 | |
7444dad2 | 43 | |
0c3acc74 GC |
44 | #define PMSU_BASE_OFFSET 0x100 |
45 | #define PMSU_REG_SIZE 0x1000 | |
46 | ||
f713c7e7 | 47 | /* PMSU MP registers */ |
c3e04cab GC |
48 | #define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104) |
49 | #define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18) | |
50 | #define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16) | |
51 | #define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20) | |
52 | ||
53 | #define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108) | |
54 | ||
55 | #define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0) | |
56 | ||
57 | #define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c) | |
58 | #define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16) | |
59 | #define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17) | |
60 | #define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20) | |
61 | #define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21) | |
62 | #define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22) | |
63 | #define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24) | |
64 | #define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25) | |
65 | ||
a509ea84 TP |
66 | #define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120) |
67 | #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1) | |
68 | #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17) | |
69 | ||
f713c7e7 GC |
70 | #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124) |
71 | ||
72 | /* PMSU fabric registers */ | |
73 | #define L2C_NFABRIC_PM_CTL 0x4 | |
74 | #define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20) | |
7444dad2 | 75 | |
e53b1fd4 GC |
76 | /* PMSU delay registers */ |
77 | #define PMSU_POWERDOWN_DELAY 0xF04 | |
78 | #define PMSU_POWERDOWN_DELAY_PMU BIT(1) | |
79 | #define PMSU_POWERDOWN_DELAY_MASK 0xFFFE | |
80 | #define PMSU_DFLT_ARMADA38X_DELAY 0x64 | |
81 | ||
82 | /* CA9 MPcore SoC Control registers */ | |
83 | ||
84 | #define MPCORE_RESET_CTL 0x64 | |
85 | #define MPCORE_RESET_CTL_L2 BIT(0) | |
86 | #define MPCORE_RESET_CTL_DEBUG BIT(16) | |
87 | ||
3076cc58 GC |
88 | #define SRAM_PHYS_BASE 0xFFFF0000 |
89 | #define BOOTROM_BASE 0xFFF00000 | |
90 | #define BOOTROM_SIZE 0x100000 | |
91 | ||
3b9e4b14 GC |
92 | #define ARMADA_370_CRYPT0_ENG_TARGET 0x9 |
93 | #define ARMADA_370_CRYPT0_ENG_ATTR 0x1 | |
94 | ||
c3e04cab GC |
95 | extern void ll_disable_coherency(void); |
96 | extern void ll_enable_coherency(void); | |
97 | ||
6509dc74 | 98 | extern void armada_370_xp_cpu_resume(void); |
e53b1fd4 GC |
99 | extern void armada_38x_cpu_resume(void); |
100 | ||
3b9e4b14 GC |
101 | static phys_addr_t pmsu_mp_phys_base; |
102 | static void __iomem *pmsu_mp_base; | |
6509dc74 | 103 | |
752a9937 | 104 | static void *mvebu_cpu_resume; |
8c16babc | 105 | |
7444dad2 | 106 | static struct of_device_id of_pmsu_table[] = { |
0c3acc74 GC |
107 | { .compatible = "marvell,armada-370-pmsu", }, |
108 | { .compatible = "marvell,armada-370-xp-pmsu", }, | |
b4bca249 | 109 | { .compatible = "marvell,armada-380-pmsu", }, |
7444dad2 GC |
110 | { /* end of list */ }, |
111 | }; | |
112 | ||
05ad6906 | 113 | void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) |
02e7b067 GC |
114 | { |
115 | writel(virt_to_phys(boot_addr), pmsu_mp_base + | |
116 | PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); | |
117 | } | |
118 | ||
3076cc58 GC |
119 | extern unsigned char mvebu_boot_wa_start; |
120 | extern unsigned char mvebu_boot_wa_end; | |
121 | ||
122 | /* | |
123 | * This function sets up the boot address workaround needed for SMP | |
124 | * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the | |
125 | * BootROM Mbus window, and instead remaps a crypto SRAM into which a | |
126 | * custom piece of code is copied to replace the problematic BootROM. | |
127 | */ | |
128 | int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target, | |
129 | unsigned int crypto_eng_attribute, | |
130 | phys_addr_t resume_addr_reg) | |
131 | { | |
132 | void __iomem *sram_virt_base; | |
133 | u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start; | |
134 | ||
135 | mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE); | |
136 | mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute, | |
137 | SRAM_PHYS_BASE, SZ_64K); | |
138 | ||
139 | sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K); | |
140 | if (!sram_virt_base) { | |
141 | pr_err("Unable to map SRAM to setup the boot address WA\n"); | |
142 | return -ENOMEM; | |
143 | } | |
144 | ||
145 | memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len); | |
146 | ||
147 | /* | |
148 | * The last word of the code copied in SRAM must contain the | |
149 | * physical base address of the PMSU register. We | |
150 | * intentionally store this address in the native endianness | |
151 | * of the system. | |
152 | */ | |
153 | __raw_writel((unsigned long)resume_addr_reg, | |
154 | sram_virt_base + code_len - 4); | |
155 | ||
156 | iounmap(sram_virt_base); | |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
898ef3e9 | 161 | static int __init mvebu_v7_pmsu_init(void) |
7444dad2 GC |
162 | { |
163 | struct device_node *np; | |
bd045a1e TP |
164 | struct resource res; |
165 | int ret = 0; | |
7444dad2 GC |
166 | |
167 | np = of_find_matching_node(NULL, of_pmsu_table); | |
bd045a1e TP |
168 | if (!np) |
169 | return 0; | |
170 | ||
171 | pr_info("Initializing Power Management Service Unit\n"); | |
172 | ||
173 | if (of_address_to_resource(np, 0, &res)) { | |
174 | pr_err("unable to get resource\n"); | |
175 | ret = -ENOENT; | |
176 | goto out; | |
7444dad2 GC |
177 | } |
178 | ||
0c3acc74 GC |
179 | if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) { |
180 | pr_warn(FW_WARN "deprecated pmsu binding\n"); | |
181 | res.start = res.start - PMSU_BASE_OFFSET; | |
182 | res.end = res.start + PMSU_REG_SIZE - 1; | |
183 | } | |
184 | ||
bd045a1e TP |
185 | if (!request_mem_region(res.start, resource_size(&res), |
186 | np->full_name)) { | |
187 | pr_err("unable to request region\n"); | |
188 | ret = -EBUSY; | |
189 | goto out; | |
190 | } | |
191 | ||
3b9e4b14 GC |
192 | pmsu_mp_phys_base = res.start; |
193 | ||
bd045a1e TP |
194 | pmsu_mp_base = ioremap(res.start, resource_size(&res)); |
195 | if (!pmsu_mp_base) { | |
196 | pr_err("unable to map registers\n"); | |
197 | release_mem_region(res.start, resource_size(&res)); | |
198 | ret = -ENOMEM; | |
199 | goto out; | |
200 | } | |
201 | ||
202 | out: | |
203 | of_node_put(np); | |
204 | return ret; | |
7444dad2 GC |
205 | } |
206 | ||
898ef3e9 | 207 | static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void) |
f713c7e7 GC |
208 | { |
209 | u32 reg; | |
210 | ||
211 | if (pmsu_mp_base == NULL) | |
212 | return; | |
213 | ||
214 | /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */ | |
215 | reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL); | |
216 | reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN; | |
217 | writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL); | |
218 | } | |
219 | ||
5da964e0 GC |
220 | enum pmsu_idle_prepare_flags { |
221 | PMSU_PREPARE_NORMAL = 0, | |
222 | PMSU_PREPARE_DEEP_IDLE = BIT(0), | |
223 | PMSU_PREPARE_SNOOP_DISABLE = BIT(1), | |
224 | }; | |
c3e04cab GC |
225 | |
226 | /* No locking is needed because we only access per-CPU registers */ | |
5da964e0 | 227 | static int mvebu_v7_pmsu_idle_prepare(unsigned long flags) |
c3e04cab GC |
228 | { |
229 | unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); | |
230 | u32 reg; | |
231 | ||
232 | if (pmsu_mp_base == NULL) | |
bbb92284 | 233 | return -EINVAL; |
c3e04cab GC |
234 | |
235 | /* | |
236 | * Adjust the PMSU configuration to wait for WFI signal, enable | |
237 | * IRQ and FIQ as wakeup events, set wait for snoop queue empty | |
238 | * indication and mask IRQ and FIQ from CPU | |
239 | */ | |
240 | reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); | |
241 | reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | | |
242 | PMSU_STATUS_AND_MASK_IRQ_WAKEUP | | |
243 | PMSU_STATUS_AND_MASK_FIQ_WAKEUP | | |
244 | PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT | | |
245 | PMSU_STATUS_AND_MASK_IRQ_MASK | | |
246 | PMSU_STATUS_AND_MASK_FIQ_MASK; | |
247 | writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); | |
248 | ||
249 | reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); | |
250 | /* ask HW to power down the L2 Cache if needed */ | |
5da964e0 | 251 | if (flags & PMSU_PREPARE_DEEP_IDLE) |
c3e04cab GC |
252 | reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN; |
253 | ||
254 | /* request power down */ | |
255 | reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ; | |
256 | writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); | |
257 | ||
5da964e0 GC |
258 | if (flags & PMSU_PREPARE_SNOOP_DISABLE) { |
259 | /* Disable snoop disable by HW - SW is taking care of it */ | |
260 | reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); | |
261 | reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP; | |
262 | writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); | |
263 | } | |
c3e04cab | 264 | |
9ce35884 GC |
265 | return 0; |
266 | } | |
267 | ||
268 | int armada_370_xp_pmsu_idle_enter(unsigned long deepidle) | |
269 | { | |
5da964e0 | 270 | unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE; |
9ce35884 GC |
271 | int ret; |
272 | ||
5da964e0 GC |
273 | if (deepidle) |
274 | flags |= PMSU_PREPARE_DEEP_IDLE; | |
275 | ||
276 | ret = mvebu_v7_pmsu_idle_prepare(flags); | |
9ce35884 GC |
277 | if (ret) |
278 | return ret; | |
c3e04cab GC |
279 | |
280 | v7_exit_coherency_flush(all); | |
281 | ||
282 | ll_disable_coherency(); | |
283 | ||
284 | dsb(); | |
285 | ||
286 | wfi(); | |
287 | ||
288 | /* If we are here, wfi failed. As processors run out of | |
289 | * coherency for some time, tlbs might be stale, so flush them | |
290 | */ | |
291 | local_flush_tlb_all(); | |
292 | ||
293 | ll_enable_coherency(); | |
294 | ||
295 | /* Test the CR_C bit and set it if it was cleared */ | |
296 | asm volatile( | |
0d461e1b GC |
297 | "mrc p15, 0, r0, c1, c0, 0 \n\t" |
298 | "tst r0, #(1 << 2) \n\t" | |
299 | "orreq r0, r0, #(1 << 2) \n\t" | |
300 | "mcreq p15, 0, r0, c1, c0, 0 \n\t" | |
c3e04cab | 301 | "isb " |
0d461e1b | 302 | : : : "r0"); |
c3e04cab | 303 | |
3b9e4b14 | 304 | pr_debug("Failed to suspend the system\n"); |
c3e04cab GC |
305 | |
306 | return 0; | |
307 | } | |
308 | ||
309 | static int armada_370_xp_cpu_suspend(unsigned long deepidle) | |
310 | { | |
bbb92284 | 311 | return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter); |
c3e04cab GC |
312 | } |
313 | ||
626d6864 | 314 | int armada_38x_do_cpu_suspend(unsigned long deepidle) |
e53b1fd4 GC |
315 | { |
316 | unsigned long flags = 0; | |
317 | ||
318 | if (deepidle) | |
319 | flags |= PMSU_PREPARE_DEEP_IDLE; | |
320 | ||
321 | mvebu_v7_pmsu_idle_prepare(flags); | |
322 | /* | |
323 | * Already flushed cache, but do it again as the outer cache | |
324 | * functions dirty the cache with spinlocks | |
325 | */ | |
326 | v7_exit_coherency_flush(louis); | |
327 | ||
328 | scu_power_mode(mvebu_get_scu_base(), SCU_PM_POWEROFF); | |
329 | ||
330 | cpu_do_idle(); | |
331 | ||
332 | return 1; | |
333 | } | |
334 | ||
335 | static int armada_38x_cpu_suspend(unsigned long deepidle) | |
336 | { | |
337 | return cpu_suspend(false, armada_38x_do_cpu_suspend); | |
338 | } | |
339 | ||
c3e04cab | 340 | /* No locking is needed because we only access per-CPU registers */ |
898ef3e9 | 341 | void mvebu_v7_pmsu_idle_exit(void) |
c3e04cab GC |
342 | { |
343 | unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); | |
344 | u32 reg; | |
345 | ||
346 | if (pmsu_mp_base == NULL) | |
347 | return; | |
c3e04cab GC |
348 | /* cancel ask HW to power down the L2 Cache if possible */ |
349 | reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); | |
350 | reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN; | |
351 | writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); | |
352 | ||
353 | /* cancel Enable wakeup events and mask interrupts */ | |
354 | reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); | |
355 | reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP); | |
356 | reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; | |
357 | reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT; | |
358 | reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK); | |
359 | writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); | |
360 | } | |
361 | ||
898ef3e9 | 362 | static int mvebu_v7_cpu_pm_notify(struct notifier_block *self, |
d163ee16 GC |
363 | unsigned long action, void *hcpu) |
364 | { | |
365 | if (action == CPU_PM_ENTER) { | |
366 | unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); | |
752a9937 | 367 | mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume); |
d163ee16 | 368 | } else if (action == CPU_PM_EXIT) { |
898ef3e9 | 369 | mvebu_v7_pmsu_idle_exit(); |
d163ee16 GC |
370 | } |
371 | ||
372 | return NOTIFY_OK; | |
373 | } | |
374 | ||
898ef3e9 GC |
375 | static struct notifier_block mvebu_v7_cpu_pm_notifier = { |
376 | .notifier_call = mvebu_v7_cpu_pm_notify, | |
d163ee16 GC |
377 | }; |
378 | ||
3b9e4b14 GC |
379 | static struct platform_device mvebu_v7_cpuidle_device; |
380 | ||
381 | static __init int armada_370_cpuidle_init(void) | |
8c16babc GC |
382 | { |
383 | struct device_node *np; | |
3b9e4b14 GC |
384 | phys_addr_t redirect_reg; |
385 | ||
386 | np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); | |
387 | if (!np) | |
388 | return -ENODEV; | |
389 | of_node_put(np); | |
8c16babc GC |
390 | |
391 | /* | |
3b9e4b14 GC |
392 | * On Armada 370, there is "a slow exit process from the deep |
393 | * idle state due to heavy L1/L2 cache cleanup operations | |
394 | * performed by the BootROM software". To avoid this, we | |
395 | * replace the restart code of the bootrom by a a simple jump | |
396 | * to the boot address. Then the code located at this boot | |
397 | * address will take care of the initialization. | |
8c16babc | 398 | */ |
3b9e4b14 GC |
399 | redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0); |
400 | mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET, | |
401 | ARMADA_370_CRYPT0_ENG_ATTR, | |
402 | redirect_reg); | |
8c16babc | 403 | |
3b9e4b14 GC |
404 | mvebu_cpu_resume = armada_370_xp_cpu_resume; |
405 | mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; | |
406 | mvebu_v7_cpuidle_device.name = "cpuidle-armada-370"; | |
407 | ||
408 | return 0; | |
409 | } | |
410 | ||
e53b1fd4 GC |
411 | static __init int armada_38x_cpuidle_init(void) |
412 | { | |
413 | struct device_node *np; | |
414 | void __iomem *mpsoc_base; | |
415 | u32 reg; | |
416 | ||
417 | np = of_find_compatible_node(NULL, NULL, | |
418 | "marvell,armada-380-coherency-fabric"); | |
419 | if (!np) | |
420 | return -ENODEV; | |
421 | of_node_put(np); | |
422 | ||
423 | np = of_find_compatible_node(NULL, NULL, | |
424 | "marvell,armada-380-mpcore-soc-ctrl"); | |
425 | if (!np) | |
426 | return -ENODEV; | |
427 | mpsoc_base = of_iomap(np, 0); | |
428 | BUG_ON(!mpsoc_base); | |
429 | of_node_put(np); | |
430 | ||
431 | /* Set up reset mask when powering down the cpus */ | |
432 | reg = readl(mpsoc_base + MPCORE_RESET_CTL); | |
433 | reg |= MPCORE_RESET_CTL_L2; | |
434 | reg |= MPCORE_RESET_CTL_DEBUG; | |
435 | writel(reg, mpsoc_base + MPCORE_RESET_CTL); | |
436 | iounmap(mpsoc_base); | |
437 | ||
438 | /* Set up delay */ | |
439 | reg = readl(pmsu_mp_base + PMSU_POWERDOWN_DELAY); | |
440 | reg &= ~PMSU_POWERDOWN_DELAY_MASK; | |
441 | reg |= PMSU_DFLT_ARMADA38X_DELAY; | |
442 | reg |= PMSU_POWERDOWN_DELAY_PMU; | |
443 | writel(reg, pmsu_mp_base + PMSU_POWERDOWN_DELAY); | |
444 | ||
445 | mvebu_cpu_resume = armada_38x_cpu_resume; | |
446 | mvebu_v7_cpuidle_device.dev.platform_data = armada_38x_cpu_suspend; | |
447 | mvebu_v7_cpuidle_device.name = "cpuidle-armada-38x"; | |
448 | ||
449 | return 0; | |
450 | } | |
451 | ||
3b9e4b14 | 452 | static __init int armada_xp_cpuidle_init(void) |
8c16babc GC |
453 | { |
454 | struct device_node *np; | |
8c16babc GC |
455 | |
456 | np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); | |
457 | if (!np) | |
54a4d1b8 | 458 | return -ENODEV; |
8c16babc GC |
459 | of_node_put(np); |
460 | ||
752a9937 | 461 | mvebu_cpu_resume = armada_370_xp_cpu_resume; |
54a4d1b8 | 462 | mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; |
3b9e4b14 | 463 | mvebu_v7_cpuidle_device.name = "cpuidle-armada-xp"; |
54a4d1b8 GC |
464 | |
465 | return 0; | |
466 | } | |
467 | ||
468 | static int __init mvebu_v7_cpu_pm_init(void) | |
469 | { | |
470 | struct device_node *np; | |
471 | int ret; | |
472 | ||
8c16babc GC |
473 | np = of_find_matching_node(NULL, of_pmsu_table); |
474 | if (!np) | |
475 | return 0; | |
476 | of_node_put(np); | |
477 | ||
54a4d1b8 GC |
478 | if (of_machine_is_compatible("marvell,armadaxp")) |
479 | ret = armada_xp_cpuidle_init(); | |
3b9e4b14 GC |
480 | else if (of_machine_is_compatible("marvell,armada370")) |
481 | ret = armada_370_cpuidle_init(); | |
e53b1fd4 GC |
482 | else if (of_machine_is_compatible("marvell,armada380")) |
483 | ret = armada_38x_cpuidle_init(); | |
54a4d1b8 GC |
484 | else |
485 | return 0; | |
486 | ||
487 | if (ret) | |
488 | return ret; | |
489 | ||
898ef3e9 | 490 | mvebu_v7_pmsu_enable_l2_powerdown_onidle(); |
898ef3e9 GC |
491 | platform_device_register(&mvebu_v7_cpuidle_device); |
492 | cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier); | |
8c16babc GC |
493 | |
494 | return 0; | |
495 | } | |
496 | ||
898ef3e9 GC |
497 | arch_initcall(mvebu_v7_cpu_pm_init); |
498 | early_initcall(mvebu_v7_pmsu_init); | |
a509ea84 TP |
499 | |
500 | static void mvebu_pmsu_dfs_request_local(void *data) | |
501 | { | |
502 | u32 reg; | |
503 | u32 cpu = smp_processor_id(); | |
504 | unsigned long flags; | |
505 | ||
506 | local_irq_save(flags); | |
507 | ||
508 | /* Prepare to enter idle */ | |
509 | reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); | |
510 | reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | | |
511 | PMSU_STATUS_AND_MASK_IRQ_MASK | | |
512 | PMSU_STATUS_AND_MASK_FIQ_MASK; | |
513 | writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); | |
514 | ||
515 | /* Request the DFS transition */ | |
516 | reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); | |
517 | reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ; | |
518 | writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); | |
519 | ||
520 | /* The fact of entering idle will trigger the DFS transition */ | |
521 | wfi(); | |
522 | ||
523 | /* | |
524 | * We're back from idle, the DFS transition has completed, | |
525 | * clear the idle wait indication. | |
526 | */ | |
527 | reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); | |
528 | reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; | |
529 | writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); | |
530 | ||
531 | local_irq_restore(flags); | |
532 | } | |
533 | ||
534 | int mvebu_pmsu_dfs_request(int cpu) | |
535 | { | |
536 | unsigned long timeout; | |
537 | int hwcpu = cpu_logical_map(cpu); | |
538 | u32 reg; | |
539 | ||
540 | /* Clear any previous DFS DONE event */ | |
541 | reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); | |
542 | reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE; | |
543 | writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); | |
544 | ||
545 | /* Mask the DFS done interrupt, since we are going to poll */ | |
546 | reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); | |
547 | reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; | |
548 | writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); | |
549 | ||
550 | /* Trigger the DFS on the appropriate CPU */ | |
551 | smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local, | |
552 | NULL, false); | |
553 | ||
554 | /* Poll until the DFS done event is generated */ | |
555 | timeout = jiffies + HZ; | |
556 | while (time_before(jiffies, timeout)) { | |
557 | reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); | |
558 | if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE) | |
559 | break; | |
560 | udelay(10); | |
561 | } | |
562 | ||
563 | if (time_after(jiffies, timeout)) | |
564 | return -ETIME; | |
565 | ||
566 | /* Restore the DFS mask to its original state */ | |
567 | reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); | |
568 | reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; | |
569 | writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); | |
570 | ||
571 | return 0; | |
572 | } | |
573 | ||
574 | static int __init armada_xp_pmsu_cpufreq_init(void) | |
575 | { | |
576 | struct device_node *np; | |
577 | struct resource res; | |
578 | int ret, cpu; | |
579 | ||
580 | if (!of_machine_is_compatible("marvell,armadaxp")) | |
581 | return 0; | |
582 | ||
583 | /* | |
584 | * In order to have proper cpufreq handling, we need to ensure | |
585 | * that the Device Tree description of the CPU clock includes | |
586 | * the definition of the PMU DFS registers. If not, we do not | |
587 | * register the clock notifier and the cpufreq driver. This | |
588 | * piece of code is only for compatibility with old Device | |
589 | * Trees. | |
590 | */ | |
591 | np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock"); | |
592 | if (!np) | |
593 | return 0; | |
594 | ||
595 | ret = of_address_to_resource(np, 1, &res); | |
596 | if (ret) { | |
597 | pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n"); | |
598 | of_node_put(np); | |
599 | return 0; | |
600 | } | |
601 | ||
602 | of_node_put(np); | |
603 | ||
604 | /* | |
605 | * For each CPU, this loop registers the operating points | |
606 | * supported (which are the nominal CPU frequency and half of | |
607 | * it), and registers the clock notifier that will take care | |
608 | * of doing the PMSU part of a frequency transition. | |
609 | */ | |
610 | for_each_possible_cpu(cpu) { | |
611 | struct device *cpu_dev; | |
612 | struct clk *clk; | |
613 | int ret; | |
614 | ||
615 | cpu_dev = get_cpu_device(cpu); | |
616 | if (!cpu_dev) { | |
617 | pr_err("Cannot get CPU %d\n", cpu); | |
618 | continue; | |
619 | } | |
620 | ||
621 | clk = clk_get(cpu_dev, 0); | |
b03e119f | 622 | if (IS_ERR(clk)) { |
a509ea84 | 623 | pr_err("Cannot get clock for CPU %d\n", cpu); |
b03e119f | 624 | return PTR_ERR(clk); |
a509ea84 TP |
625 | } |
626 | ||
627 | /* | |
628 | * In case of a failure of dev_pm_opp_add(), we don't | |
629 | * bother with cleaning up the registered OPP (there's | |
630 | * no function to do so), and simply cancel the | |
631 | * registration of the cpufreq device. | |
632 | */ | |
633 | ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0); | |
634 | if (ret) { | |
635 | clk_put(clk); | |
636 | return ret; | |
637 | } | |
638 | ||
639 | ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0); | |
640 | if (ret) { | |
641 | clk_put(clk); | |
642 | return ret; | |
643 | } | |
644 | } | |
645 | ||
bbcf0719 | 646 | platform_device_register_simple("cpufreq-dt", -1, NULL, 0); |
a509ea84 TP |
647 | return 0; |
648 | } | |
649 | ||
650 | device_initcall(armada_xp_pmsu_cpufreq_init); |