9190ae8626cf8f21d44b56c24773514020151da0
[deliverable/linux.git] / arch / arm / mach-mvebu / pmsu.c
1 /*
2 * Power Management Service Unit(PMSU) support for Armada 370/XP platforms.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory Clement <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * The Armada 370 and Armada XP SOCs have a power management service
15 * unit which is responsible for powering down and waking up CPUs and
16 * other SOC units
17 */
18
19 #define pr_fmt(fmt) "mvebu-pmsu: " fmt
20
21 #include <linux/cpu_pm.h>
22 #include <linux/init.h>
23 #include <linux/io.h>
24 #include <linux/kernel.h>
25 #include <linux/mbus.h>
26 #include <linux/of_address.h>
27 #include <linux/platform_device.h>
28 #include <linux/resource.h>
29 #include <linux/smp.h>
30 #include <asm/cacheflush.h>
31 #include <asm/cp15.h>
32 #include <asm/smp_plat.h>
33 #include <asm/suspend.h>
34 #include <asm/tlbflush.h>
35 #include "common.h"
36
37
38 #define PMSU_BASE_OFFSET 0x100
39 #define PMSU_REG_SIZE 0x1000
40
41 /* PMSU MP registers */
42 #define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104)
43 #define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18)
44 #define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16)
45 #define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20)
46
47 #define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108)
48
49 #define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0)
50
51 #define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c)
52 #define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16)
53 #define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17)
54 #define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20)
55 #define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21)
56 #define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22)
57 #define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24)
58 #define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25)
59
60 #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
61
62 /* PMSU fabric registers */
63 #define L2C_NFABRIC_PM_CTL 0x4
64 #define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20)
65
66 #define SRAM_PHYS_BASE 0xFFFF0000
67 #define BOOTROM_BASE 0xFFF00000
68 #define BOOTROM_SIZE 0x100000
69
70 #define ARMADA_370_CRYPT0_ENG_TARGET 0x9
71 #define ARMADA_370_CRYPT0_ENG_ATTR 0x1
72
73 extern void ll_disable_coherency(void);
74 extern void ll_enable_coherency(void);
75
76 extern void armada_370_xp_cpu_resume(void);
77 static phys_addr_t pmsu_mp_phys_base;
78 static void __iomem *pmsu_mp_base;
79
80 static void *mvebu_cpu_resume;
81
82 static struct of_device_id of_pmsu_table[] = {
83 { .compatible = "marvell,armada-370-pmsu", },
84 { .compatible = "marvell,armada-370-xp-pmsu", },
85 { .compatible = "marvell,armada-380-pmsu", },
86 { /* end of list */ },
87 };
88
89 void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
90 {
91 writel(virt_to_phys(boot_addr), pmsu_mp_base +
92 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
93 }
94
95 extern unsigned char mvebu_boot_wa_start;
96 extern unsigned char mvebu_boot_wa_end;
97
98 /*
99 * This function sets up the boot address workaround needed for SMP
100 * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the
101 * BootROM Mbus window, and instead remaps a crypto SRAM into which a
102 * custom piece of code is copied to replace the problematic BootROM.
103 */
104 int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
105 unsigned int crypto_eng_attribute,
106 phys_addr_t resume_addr_reg)
107 {
108 void __iomem *sram_virt_base;
109 u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
110
111 mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
112 mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
113 SRAM_PHYS_BASE, SZ_64K);
114
115 sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K);
116 if (!sram_virt_base) {
117 pr_err("Unable to map SRAM to setup the boot address WA\n");
118 return -ENOMEM;
119 }
120
121 memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len);
122
123 /*
124 * The last word of the code copied in SRAM must contain the
125 * physical base address of the PMSU register. We
126 * intentionally store this address in the native endianness
127 * of the system.
128 */
129 __raw_writel((unsigned long)resume_addr_reg,
130 sram_virt_base + code_len - 4);
131
132 iounmap(sram_virt_base);
133
134 return 0;
135 }
136
137 static int __init mvebu_v7_pmsu_init(void)
138 {
139 struct device_node *np;
140 struct resource res;
141 int ret = 0;
142
143 np = of_find_matching_node(NULL, of_pmsu_table);
144 if (!np)
145 return 0;
146
147 pr_info("Initializing Power Management Service Unit\n");
148
149 if (of_address_to_resource(np, 0, &res)) {
150 pr_err("unable to get resource\n");
151 ret = -ENOENT;
152 goto out;
153 }
154
155 if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) {
156 pr_warn(FW_WARN "deprecated pmsu binding\n");
157 res.start = res.start - PMSU_BASE_OFFSET;
158 res.end = res.start + PMSU_REG_SIZE - 1;
159 }
160
161 if (!request_mem_region(res.start, resource_size(&res),
162 np->full_name)) {
163 pr_err("unable to request region\n");
164 ret = -EBUSY;
165 goto out;
166 }
167
168 pmsu_mp_phys_base = res.start;
169
170 pmsu_mp_base = ioremap(res.start, resource_size(&res));
171 if (!pmsu_mp_base) {
172 pr_err("unable to map registers\n");
173 release_mem_region(res.start, resource_size(&res));
174 ret = -ENOMEM;
175 goto out;
176 }
177
178 out:
179 of_node_put(np);
180 return ret;
181 }
182
183 static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void)
184 {
185 u32 reg;
186
187 if (pmsu_mp_base == NULL)
188 return;
189
190 /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */
191 reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL);
192 reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN;
193 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL);
194 }
195
196 enum pmsu_idle_prepare_flags {
197 PMSU_PREPARE_NORMAL = 0,
198 PMSU_PREPARE_DEEP_IDLE = BIT(0),
199 PMSU_PREPARE_SNOOP_DISABLE = BIT(1),
200 };
201
202 /* No locking is needed because we only access per-CPU registers */
203 static int mvebu_v7_pmsu_idle_prepare(unsigned long flags)
204 {
205 unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
206 u32 reg;
207
208 if (pmsu_mp_base == NULL)
209 return -EINVAL;
210
211 /*
212 * Adjust the PMSU configuration to wait for WFI signal, enable
213 * IRQ and FIQ as wakeup events, set wait for snoop queue empty
214 * indication and mask IRQ and FIQ from CPU
215 */
216 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
217 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
218 PMSU_STATUS_AND_MASK_IRQ_WAKEUP |
219 PMSU_STATUS_AND_MASK_FIQ_WAKEUP |
220 PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT |
221 PMSU_STATUS_AND_MASK_IRQ_MASK |
222 PMSU_STATUS_AND_MASK_FIQ_MASK;
223 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
224
225 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
226 /* ask HW to power down the L2 Cache if needed */
227 if (flags & PMSU_PREPARE_DEEP_IDLE)
228 reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
229
230 /* request power down */
231 reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ;
232 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
233
234 if (flags & PMSU_PREPARE_SNOOP_DISABLE) {
235 /* Disable snoop disable by HW - SW is taking care of it */
236 reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
237 reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP;
238 writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
239 }
240
241 return 0;
242 }
243
244 int armada_370_xp_pmsu_idle_enter(unsigned long deepidle)
245 {
246 unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE;
247 int ret;
248
249 if (deepidle)
250 flags |= PMSU_PREPARE_DEEP_IDLE;
251
252 ret = mvebu_v7_pmsu_idle_prepare(flags);
253 if (ret)
254 return ret;
255
256 v7_exit_coherency_flush(all);
257
258 ll_disable_coherency();
259
260 dsb();
261
262 wfi();
263
264 /* If we are here, wfi failed. As processors run out of
265 * coherency for some time, tlbs might be stale, so flush them
266 */
267 local_flush_tlb_all();
268
269 ll_enable_coherency();
270
271 /* Test the CR_C bit and set it if it was cleared */
272 asm volatile(
273 "mrc p15, 0, r0, c1, c0, 0 \n\t"
274 "tst r0, #(1 << 2) \n\t"
275 "orreq r0, r0, #(1 << 2) \n\t"
276 "mcreq p15, 0, r0, c1, c0, 0 \n\t"
277 "isb "
278 : : : "r0");
279
280 pr_debug("Failed to suspend the system\n");
281
282 return 0;
283 }
284
285 static int armada_370_xp_cpu_suspend(unsigned long deepidle)
286 {
287 return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter);
288 }
289
290 /* No locking is needed because we only access per-CPU registers */
291 void mvebu_v7_pmsu_idle_exit(void)
292 {
293 unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
294 u32 reg;
295
296 if (pmsu_mp_base == NULL)
297 return;
298
299 /* cancel ask HW to power down the L2 Cache if possible */
300 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
301 reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
302 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
303
304 /* cancel Enable wakeup events and mask interrupts */
305 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
306 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP);
307 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
308 reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT;
309 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK);
310 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
311 }
312
313 static int mvebu_v7_cpu_pm_notify(struct notifier_block *self,
314 unsigned long action, void *hcpu)
315 {
316 if (action == CPU_PM_ENTER) {
317 unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
318 mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume);
319 } else if (action == CPU_PM_EXIT) {
320 mvebu_v7_pmsu_idle_exit();
321 }
322
323 return NOTIFY_OK;
324 }
325
326 static struct notifier_block mvebu_v7_cpu_pm_notifier = {
327 .notifier_call = mvebu_v7_cpu_pm_notify,
328 };
329
330 static struct platform_device mvebu_v7_cpuidle_device;
331
332 static __init int armada_370_cpuidle_init(void)
333 {
334 struct device_node *np;
335 phys_addr_t redirect_reg;
336
337 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
338 if (!np)
339 return -ENODEV;
340 of_node_put(np);
341
342 /*
343 * On Armada 370, there is "a slow exit process from the deep
344 * idle state due to heavy L1/L2 cache cleanup operations
345 * performed by the BootROM software". To avoid this, we
346 * replace the restart code of the bootrom by a a simple jump
347 * to the boot address. Then the code located at this boot
348 * address will take care of the initialization.
349 */
350 redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0);
351 mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET,
352 ARMADA_370_CRYPT0_ENG_ATTR,
353 redirect_reg);
354
355 mvebu_cpu_resume = armada_370_xp_cpu_resume;
356 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend;
357 mvebu_v7_cpuidle_device.name = "cpuidle-armada-370";
358
359 return 0;
360 }
361
362 static __init int armada_xp_cpuidle_init(void)
363 {
364 struct device_node *np;
365
366 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
367 if (!np)
368 return -ENODEV;
369 of_node_put(np);
370
371 mvebu_cpu_resume = armada_370_xp_cpu_resume;
372 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend;
373 mvebu_v7_cpuidle_device.name = "cpuidle-armada-xp";
374
375 return 0;
376 }
377
378 static int __init mvebu_v7_cpu_pm_init(void)
379 {
380 struct device_node *np;
381 int ret;
382
383 np = of_find_matching_node(NULL, of_pmsu_table);
384 if (!np)
385 return 0;
386 of_node_put(np);
387
388 if (of_machine_is_compatible("marvell,armadaxp"))
389 ret = armada_xp_cpuidle_init();
390 else if (of_machine_is_compatible("marvell,armada370"))
391 ret = armada_370_cpuidle_init();
392 else
393 return 0;
394
395 if (ret)
396 return ret;
397
398 mvebu_v7_pmsu_enable_l2_powerdown_onidle();
399 platform_device_register(&mvebu_v7_cpuidle_device);
400 cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);
401
402 return 0;
403 }
404
405 arch_initcall(mvebu_v7_cpu_pm_init);
406 early_initcall(mvebu_v7_pmsu_init);
This page took 0.045678 seconds and 5 git commands to generate.