Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / arm / mach-exynos / platsmp.c
1 /*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Cloned from linux/arch/arm/mach-vexpress/platsmp.c
6 *
7 * Copyright (C) 2002 ARM Ltd.
8 * All Rights Reserved
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/jiffies.h>
20 #include <linux/smp.h>
21 #include <linux/io.h>
22 #include <linux/of_address.h>
23
24 #include <asm/cacheflush.h>
25 #include <asm/cp15.h>
26 #include <asm/smp_plat.h>
27 #include <asm/smp_scu.h>
28 #include <asm/firmware.h>
29
30 #include <mach/map.h>
31
32 #include "common.h"
33 #include "regs-pmu.h"
34
35 extern void exynos4_secondary_startup(void);
36
37 /*
38 * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
39 * during hot-(un)plugging CPUx.
40 *
41 * The feature can be cleared safely during first boot of secondary CPU.
42 *
43 * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
44 * down a CPU so the CPU idle clock down feature could properly detect global
45 * idle state when CPUx is off.
46 */
47 static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
48 {
49 if (soc_is_exynos4()) {
50 unsigned int tmp;
51
52 tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
53 if (enable)
54 tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
55 else
56 tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
57 pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
58 }
59 }
60
61 #ifdef CONFIG_HOTPLUG_CPU
62 static inline void cpu_leave_lowpower(u32 core_id)
63 {
64 unsigned int v;
65
66 asm volatile(
67 "mrc p15, 0, %0, c1, c0, 0\n"
68 " orr %0, %0, %1\n"
69 " mcr p15, 0, %0, c1, c0, 0\n"
70 " mrc p15, 0, %0, c1, c0, 1\n"
71 " orr %0, %0, %2\n"
72 " mcr p15, 0, %0, c1, c0, 1\n"
73 : "=&r" (v)
74 : "Ir" (CR_C), "Ir" (0x40)
75 : "cc");
76
77 exynos_set_delayed_reset_assertion(core_id, false);
78 }
79
80 static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
81 {
82 u32 mpidr = cpu_logical_map(cpu);
83 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
84
85 for (;;) {
86
87 /* Turn the CPU off on next WFI instruction. */
88 exynos_cpu_power_down(core_id);
89
90 /*
91 * Exynos4 SoCs require setting
92 * USE_DELAYED_RESET_ASSERTION so the CPU idle
93 * clock down feature could properly detect
94 * global idle state when CPUx is off.
95 */
96 exynos_set_delayed_reset_assertion(core_id, true);
97
98 wfi();
99
100 if (pen_release == core_id) {
101 /*
102 * OK, proper wakeup, we're done
103 */
104 break;
105 }
106
107 /*
108 * Getting here, means that we have come out of WFI without
109 * having been woken up - this shouldn't happen
110 *
111 * Just note it happening - when we're woken, we can report
112 * its occurrence.
113 */
114 (*spurious)++;
115 }
116 }
117 #endif /* CONFIG_HOTPLUG_CPU */
118
119 /**
120 * exynos_core_power_down : power down the specified cpu
121 * @cpu : the cpu to power down
122 *
123 * Power down the specified cpu. The sequence must be finished by a
124 * call to cpu_do_idle()
125 *
126 */
127 void exynos_cpu_power_down(int cpu)
128 {
129 u32 core_conf;
130
131 if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
132 /*
133 * Bypass power down for CPU0 during suspend. Check for
134 * the SYS_PWR_REG value to decide if we are suspending
135 * the system.
136 */
137 int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG);
138
139 if (!(val & S5P_CORE_LOCAL_PWR_EN))
140 return;
141 }
142
143 core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
144 core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
145 pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
146 }
147
148 /**
149 * exynos_cpu_power_up : power up the specified cpu
150 * @cpu : the cpu to power up
151 *
152 * Power up the specified cpu
153 */
154 void exynos_cpu_power_up(int cpu)
155 {
156 u32 core_conf = S5P_CORE_LOCAL_PWR_EN;
157
158 if (soc_is_exynos3250())
159 core_conf |= S5P_CORE_AUTOWAKEUP_EN;
160
161 pmu_raw_writel(core_conf,
162 EXYNOS_ARM_CORE_CONFIGURATION(cpu));
163 }
164
165 /**
166 * exynos_cpu_power_state : returns the power state of the cpu
167 * @cpu : the cpu to retrieve the power state from
168 *
169 */
170 int exynos_cpu_power_state(int cpu)
171 {
172 return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
173 S5P_CORE_LOCAL_PWR_EN);
174 }
175
176 /**
177 * exynos_cluster_power_down : power down the specified cluster
178 * @cluster : the cluster to power down
179 */
180 void exynos_cluster_power_down(int cluster)
181 {
182 pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
183 }
184
185 /**
186 * exynos_cluster_power_up : power up the specified cluster
187 * @cluster : the cluster to power up
188 */
189 void exynos_cluster_power_up(int cluster)
190 {
191 pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
192 EXYNOS_COMMON_CONFIGURATION(cluster));
193 }
194
195 /**
196 * exynos_cluster_power_state : returns the power state of the cluster
197 * @cluster : the cluster to retrieve the power state from
198 *
199 */
200 int exynos_cluster_power_state(int cluster)
201 {
202 return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
203 S5P_CORE_LOCAL_PWR_EN);
204 }
205
206 void __iomem *cpu_boot_reg_base(void)
207 {
208 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
209 return pmu_base_addr + S5P_INFORM5;
210 return sysram_base_addr;
211 }
212
213 static inline void __iomem *cpu_boot_reg(int cpu)
214 {
215 void __iomem *boot_reg;
216
217 boot_reg = cpu_boot_reg_base();
218 if (!boot_reg)
219 return ERR_PTR(-ENODEV);
220 if (soc_is_exynos4412())
221 boot_reg += 4*cpu;
222 else if (soc_is_exynos5420() || soc_is_exynos5800())
223 boot_reg += 4;
224 return boot_reg;
225 }
226
227 /*
228 * Set wake up by local power mode and execute software reset for given core.
229 *
230 * Currently this is needed only when booting secondary CPU on Exynos3250.
231 */
232 static void exynos_core_restart(u32 core_id)
233 {
234 u32 val;
235
236 if (!of_machine_is_compatible("samsung,exynos3250"))
237 return;
238
239 while (!pmu_raw_readl(S5P_PMU_SPARE2))
240 udelay(10);
241 udelay(10);
242
243 val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
244 val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
245 pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
246
247 pr_info("CPU%u: Software reset\n", core_id);
248 pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET);
249 }
250
251 /*
252 * Write pen_release in a way that is guaranteed to be visible to all
253 * observers, irrespective of whether they're taking part in coherency
254 * or not. This is necessary for the hotplug code to work reliably.
255 */
256 static void write_pen_release(int val)
257 {
258 pen_release = val;
259 smp_wmb();
260 sync_cache_w(&pen_release);
261 }
262
263 static void __iomem *scu_base_addr(void)
264 {
265 return (void __iomem *)(S5P_VA_SCU);
266 }
267
268 static DEFINE_SPINLOCK(boot_lock);
269
270 static void exynos_secondary_init(unsigned int cpu)
271 {
272 /*
273 * let the primary processor know we're out of the
274 * pen, then head off into the C entry point
275 */
276 write_pen_release(-1);
277
278 /*
279 * Synchronise with the boot thread.
280 */
281 spin_lock(&boot_lock);
282 spin_unlock(&boot_lock);
283 }
284
285 static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
286 {
287 unsigned long timeout;
288 u32 mpidr = cpu_logical_map(cpu);
289 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
290 int ret = -ENOSYS;
291
292 /*
293 * Set synchronisation state between this boot processor
294 * and the secondary one
295 */
296 spin_lock(&boot_lock);
297
298 /*
299 * The secondary processor is waiting to be released from
300 * the holding pen - release it, then wait for it to flag
301 * that it has been released by resetting pen_release.
302 *
303 * Note that "pen_release" is the hardware CPU core ID, whereas
304 * "cpu" is Linux's internal ID.
305 */
306 write_pen_release(core_id);
307
308 if (!exynos_cpu_power_state(core_id)) {
309 exynos_cpu_power_up(core_id);
310 timeout = 10;
311
312 /* wait max 10 ms until cpu1 is on */
313 while (exynos_cpu_power_state(core_id)
314 != S5P_CORE_LOCAL_PWR_EN) {
315 if (timeout-- == 0)
316 break;
317
318 mdelay(1);
319 }
320
321 if (timeout == 0) {
322 printk(KERN_ERR "cpu1 power enable failed");
323 spin_unlock(&boot_lock);
324 return -ETIMEDOUT;
325 }
326 }
327
328 exynos_core_restart(core_id);
329
330 /*
331 * Send the secondary CPU a soft interrupt, thereby causing
332 * the boot monitor to read the system wide flags register,
333 * and branch to the address found there.
334 */
335
336 timeout = jiffies + (1 * HZ);
337 while (time_before(jiffies, timeout)) {
338 unsigned long boot_addr;
339
340 smp_rmb();
341
342 boot_addr = virt_to_phys(exynos4_secondary_startup);
343
344 /*
345 * Try to set boot address using firmware first
346 * and fall back to boot register if it fails.
347 */
348 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
349 if (ret && ret != -ENOSYS)
350 goto fail;
351 if (ret == -ENOSYS) {
352 void __iomem *boot_reg = cpu_boot_reg(core_id);
353
354 if (IS_ERR(boot_reg)) {
355 ret = PTR_ERR(boot_reg);
356 goto fail;
357 }
358 __raw_writel(boot_addr, boot_reg);
359 }
360
361 call_firmware_op(cpu_boot, core_id);
362
363 if (soc_is_exynos3250())
364 dsb_sev();
365 else
366 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
367
368 if (pen_release == -1)
369 break;
370
371 udelay(10);
372 }
373
374 /* No harm if this is called during first boot of secondary CPU */
375 exynos_set_delayed_reset_assertion(core_id, false);
376
377 /*
378 * now the secondary core is starting up let it run its
379 * calibrations, then wait for it to finish
380 */
381 fail:
382 spin_unlock(&boot_lock);
383
384 return pen_release != -1 ? ret : 0;
385 }
386
387 /*
388 * Initialise the CPU possible map early - this describes the CPUs
389 * which may be present or become present in the system.
390 */
391
392 static void __init exynos_smp_init_cpus(void)
393 {
394 void __iomem *scu_base = scu_base_addr();
395 unsigned int i, ncores;
396
397 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
398 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
399 else
400 /*
401 * CPU Nodes are passed thru DT and set_cpu_possible
402 * is set by "arm_dt_init_cpu_maps".
403 */
404 return;
405
406 /* sanity check */
407 if (ncores > nr_cpu_ids) {
408 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
409 ncores, nr_cpu_ids);
410 ncores = nr_cpu_ids;
411 }
412
413 for (i = 0; i < ncores; i++)
414 set_cpu_possible(i, true);
415 }
416
417 static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
418 {
419 int i;
420
421 exynos_sysram_init();
422
423 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
424 scu_enable(scu_base_addr());
425
426 /*
427 * Write the address of secondary startup into the
428 * system-wide flags register. The boot monitor waits
429 * until it receives a soft interrupt, and then the
430 * secondary CPU branches to this address.
431 *
432 * Try using firmware operation first and fall back to
433 * boot register if it fails.
434 */
435 for (i = 1; i < max_cpus; ++i) {
436 unsigned long boot_addr;
437 u32 mpidr;
438 u32 core_id;
439 int ret;
440
441 mpidr = cpu_logical_map(i);
442 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
443 boot_addr = virt_to_phys(exynos4_secondary_startup);
444
445 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
446 if (ret && ret != -ENOSYS)
447 break;
448 if (ret == -ENOSYS) {
449 void __iomem *boot_reg = cpu_boot_reg(core_id);
450
451 if (IS_ERR(boot_reg))
452 break;
453 __raw_writel(boot_addr, boot_reg);
454 }
455 }
456 }
457
458 #ifdef CONFIG_HOTPLUG_CPU
459 /*
460 * platform-specific code to shutdown a CPU
461 *
462 * Called with IRQs disabled
463 */
464 static void exynos_cpu_die(unsigned int cpu)
465 {
466 int spurious = 0;
467 u32 mpidr = cpu_logical_map(cpu);
468 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
469
470 v7_exit_coherency_flush(louis);
471
472 platform_do_lowpower(cpu, &spurious);
473
474 /*
475 * bring this CPU back into the world of cache
476 * coherency, and then restore interrupts
477 */
478 cpu_leave_lowpower(core_id);
479
480 if (spurious)
481 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
482 }
483 #endif /* CONFIG_HOTPLUG_CPU */
484
485 struct smp_operations exynos_smp_ops __initdata = {
486 .smp_init_cpus = exynos_smp_init_cpus,
487 .smp_prepare_cpus = exynos_smp_prepare_cpus,
488 .smp_secondary_init = exynos_secondary_init,
489 .smp_boot_secondary = exynos_boot_secondary,
490 #ifdef CONFIG_HOTPLUG_CPU
491 .cpu_die = exynos_cpu_die,
492 #endif
493 };
This page took 0.041057 seconds and 6 git commands to generate.