Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/smp.c | |
3 | * | |
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
c97d4869 | 10 | #include <linux/module.h> |
1da177e4 LT |
11 | #include <linux/delay.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/cache.h> | |
17 | #include <linux/profile.h> | |
18 | #include <linux/errno.h> | |
19 | #include <linux/mm.h> | |
4e950f6f | 20 | #include <linux/err.h> |
1da177e4 | 21 | #include <linux/cpu.h> |
1da177e4 | 22 | #include <linux/seq_file.h> |
c97d4869 | 23 | #include <linux/irq.h> |
bc28248e RK |
24 | #include <linux/percpu.h> |
25 | #include <linux/clockchips.h> | |
3c030bea | 26 | #include <linux/completion.h> |
ec971ea5 | 27 | #include <linux/cpufreq.h> |
1da177e4 | 28 | |
60063497 | 29 | #include <linux/atomic.h> |
abcee5fb | 30 | #include <asm/smp.h> |
1da177e4 LT |
31 | #include <asm/cacheflush.h> |
32 | #include <asm/cpu.h> | |
42578c82 | 33 | #include <asm/cputype.h> |
5a567d78 | 34 | #include <asm/exception.h> |
8903826d | 35 | #include <asm/idmap.h> |
c9018aab | 36 | #include <asm/topology.h> |
e65f38ed RK |
37 | #include <asm/mmu_context.h> |
38 | #include <asm/pgtable.h> | |
39 | #include <asm/pgalloc.h> | |
1da177e4 | 40 | #include <asm/processor.h> |
37b05b63 | 41 | #include <asm/sections.h> |
1da177e4 LT |
42 | #include <asm/tlbflush.h> |
43 | #include <asm/ptrace.h> | |
bc28248e | 44 | #include <asm/localtimer.h> |
d6257288 | 45 | #include <asm/smp_plat.h> |
4588c34d | 46 | #include <asm/virt.h> |
abcee5fb | 47 | #include <asm/mach/arch.h> |
eb08375e | 48 | #include <asm/mpu.h> |
1da177e4 | 49 | |
e65f38ed RK |
50 | /* |
51 | * as from 2.5, kernels no longer have an init_tasks structure | |
52 | * so we need some other way of telling a new secondary core | |
53 | * where to place its SVC stack | |
54 | */ | |
55 | struct secondary_data secondary_data; | |
56 | ||
28e8e29c MZ |
57 | /* |
58 | * control for which core is the next to come out of the secondary | |
59 | * boot "holding pen" | |
60 | */ | |
8bd26e3a | 61 | volatile int pen_release = -1; |
28e8e29c | 62 | |
1da177e4 | 63 | enum ipi_msg_type { |
559a5939 SB |
64 | IPI_WAKEUP, |
65 | IPI_TIMER, | |
1da177e4 LT |
66 | IPI_RESCHEDULE, |
67 | IPI_CALL_FUNC, | |
f6dd9fa5 | 68 | IPI_CALL_FUNC_SINGLE, |
1da177e4 LT |
69 | IPI_CPU_STOP, |
70 | }; | |
71 | ||
149c2415 RK |
72 | static DECLARE_COMPLETION(cpu_running); |
73 | ||
abcee5fb MZ |
74 | static struct smp_operations smp_ops; |
75 | ||
76 | void __init smp_set_ops(struct smp_operations *ops) | |
77 | { | |
78 | if (ops) | |
79 | smp_ops = *ops; | |
80 | }; | |
81 | ||
4756dcbf CC |
82 | static unsigned long get_arch_pgd(pgd_t *pgd) |
83 | { | |
84 | phys_addr_t pgdir = virt_to_phys(pgd); | |
85 | BUG_ON(pgdir & ARCH_PGD_MASK); | |
86 | return pgdir >> ARCH_PGD_SHIFT; | |
87 | } | |
88 | ||
8bd26e3a | 89 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
1da177e4 | 90 | { |
1da177e4 LT |
91 | int ret; |
92 | ||
e65f38ed RK |
93 | /* |
94 | * We need to tell the secondary core where to find | |
95 | * its stack and the page tables. | |
96 | */ | |
32d39a93 | 97 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
eb08375e JA |
98 | #ifdef CONFIG_ARM_MPU |
99 | secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; | |
100 | #endif | |
101 | ||
c4a1f032 | 102 | #ifdef CONFIG_MMU |
4756dcbf CC |
103 | secondary_data.pgdir = get_arch_pgd(idmap_pgd); |
104 | secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); | |
c4a1f032 | 105 | #endif |
1027247f RK |
106 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
107 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); | |
e65f38ed | 108 | |
1da177e4 LT |
109 | /* |
110 | * Now bring the CPU into our world. | |
111 | */ | |
112 | ret = boot_secondary(cpu, idle); | |
e65f38ed | 113 | if (ret == 0) { |
e65f38ed RK |
114 | /* |
115 | * CPU was successfully started, wait for it | |
116 | * to come online or time out. | |
117 | */ | |
149c2415 RK |
118 | wait_for_completion_timeout(&cpu_running, |
119 | msecs_to_jiffies(1000)); | |
e65f38ed | 120 | |
58613cd1 RK |
121 | if (!cpu_online(cpu)) { |
122 | pr_crit("CPU%u: failed to come online\n", cpu); | |
e65f38ed | 123 | ret = -EIO; |
58613cd1 RK |
124 | } |
125 | } else { | |
126 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | |
e65f38ed RK |
127 | } |
128 | ||
e65f38ed | 129 | |
eb08375e | 130 | memset(&secondary_data, 0, sizeof(secondary_data)); |
1da177e4 LT |
131 | return ret; |
132 | } | |
133 | ||
abcee5fb | 134 | /* platform specific SMP operations */ |
ac6c7998 | 135 | void __init smp_init_cpus(void) |
abcee5fb MZ |
136 | { |
137 | if (smp_ops.smp_init_cpus) | |
138 | smp_ops.smp_init_cpus(); | |
139 | } | |
140 | ||
8bd26e3a | 141 | int boot_secondary(unsigned int cpu, struct task_struct *idle) |
abcee5fb MZ |
142 | { |
143 | if (smp_ops.smp_boot_secondary) | |
144 | return smp_ops.smp_boot_secondary(cpu, idle); | |
145 | return -ENOSYS; | |
146 | } | |
147 | ||
2103f6cb SW |
148 | int platform_can_cpu_hotplug(void) |
149 | { | |
150 | #ifdef CONFIG_HOTPLUG_CPU | |
151 | if (smp_ops.cpu_kill) | |
152 | return 1; | |
153 | #endif | |
154 | ||
155 | return 0; | |
156 | } | |
157 | ||
a054a811 | 158 | #ifdef CONFIG_HOTPLUG_CPU |
10034aab RK |
159 | static void percpu_timer_stop(void); |
160 | ||
ac6c7998 | 161 | static int platform_cpu_kill(unsigned int cpu) |
abcee5fb MZ |
162 | { |
163 | if (smp_ops.cpu_kill) | |
164 | return smp_ops.cpu_kill(cpu); | |
165 | return 1; | |
166 | } | |
167 | ||
ac6c7998 | 168 | static int platform_cpu_disable(unsigned int cpu) |
abcee5fb MZ |
169 | { |
170 | if (smp_ops.cpu_disable) | |
171 | return smp_ops.cpu_disable(cpu); | |
172 | ||
173 | /* | |
174 | * By default, allow disabling all CPUs except the first one, | |
175 | * since this is special on a lot of platforms, e.g. because | |
176 | * of clock tick interrupts. | |
177 | */ | |
178 | return cpu == 0 ? -EPERM : 0; | |
179 | } | |
a054a811 RK |
180 | /* |
181 | * __cpu_disable runs on the processor to be shutdown. | |
182 | */ | |
8bd26e3a | 183 | int __cpu_disable(void) |
a054a811 RK |
184 | { |
185 | unsigned int cpu = smp_processor_id(); | |
a054a811 RK |
186 | int ret; |
187 | ||
8e2a43f5 | 188 | ret = platform_cpu_disable(cpu); |
a054a811 RK |
189 | if (ret) |
190 | return ret; | |
191 | ||
192 | /* | |
193 | * Take this CPU offline. Once we clear this, we can't return, | |
194 | * and we must not schedule until we're ready to give up the cpu. | |
195 | */ | |
e03cdade | 196 | set_cpu_online(cpu, false); |
a054a811 RK |
197 | |
198 | /* | |
199 | * OK - migrate IRQs away from this CPU | |
200 | */ | |
201 | migrate_irqs(); | |
202 | ||
37ee16ae RK |
203 | /* |
204 | * Stop the local timer for this CPU. | |
205 | */ | |
10034aab | 206 | percpu_timer_stop(); |
37ee16ae | 207 | |
a054a811 RK |
208 | /* |
209 | * Flush user cache and TLB mappings, and then remove this CPU | |
210 | * from the vm mask set of all processes. | |
e6b866e9 LP |
211 | * |
212 | * Caches are flushed to the Level of Unification Inner Shareable | |
213 | * to write-back dirty lines to unified caches shared by all CPUs. | |
a054a811 | 214 | */ |
e6b866e9 | 215 | flush_cache_louis(); |
a054a811 RK |
216 | local_flush_tlb_all(); |
217 | ||
3eaa73bd | 218 | clear_tasks_mm_cpumask(cpu); |
a054a811 RK |
219 | |
220 | return 0; | |
221 | } | |
222 | ||
3c030bea RK |
223 | static DECLARE_COMPLETION(cpu_died); |
224 | ||
a054a811 RK |
225 | /* |
226 | * called on the thread which is asking for a CPU to be shutdown - | |
227 | * waits until shutdown has completed, or it is timed out. | |
228 | */ | |
8bd26e3a | 229 | void __cpu_die(unsigned int cpu) |
a054a811 | 230 | { |
3c030bea RK |
231 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { |
232 | pr_err("CPU%u: cpu didn't die\n", cpu); | |
233 | return; | |
234 | } | |
235 | printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); | |
236 | ||
51acdfd1 RK |
237 | /* |
238 | * platform_cpu_kill() is generally expected to do the powering off | |
239 | * and/or cutting of clocks to the dying CPU. Optionally, this may | |
240 | * be done by the CPU which is dying in preference to supporting | |
241 | * this call, but that means there is _no_ synchronisation between | |
242 | * the requesting CPU and the dying CPU actually losing power. | |
243 | */ | |
a054a811 RK |
244 | if (!platform_cpu_kill(cpu)) |
245 | printk("CPU%u: unable to kill\n", cpu); | |
246 | } | |
247 | ||
248 | /* | |
249 | * Called from the idle thread for the CPU which has been shutdown. | |
250 | * | |
251 | * Note that we disable IRQs here, but do not re-enable them | |
252 | * before returning to the caller. This is also the behaviour | |
253 | * of the other hotplug-cpu capable cores, so presumably coming | |
254 | * out of idle fixes this. | |
255 | */ | |
90140c30 | 256 | void __ref cpu_die(void) |
a054a811 RK |
257 | { |
258 | unsigned int cpu = smp_processor_id(); | |
259 | ||
a054a811 RK |
260 | idle_task_exit(); |
261 | ||
f36d3401 | 262 | local_irq_disable(); |
f36d3401 | 263 | |
51acdfd1 RK |
264 | /* |
265 | * Flush the data out of the L1 cache for this CPU. This must be | |
266 | * before the completion to ensure that data is safely written out | |
267 | * before platform_cpu_kill() gets called - which may disable | |
268 | * *this* CPU and power down its cache. | |
269 | */ | |
270 | flush_cache_louis(); | |
271 | ||
272 | /* | |
273 | * Tell __cpu_die() that this CPU is now safe to dispose of. Once | |
274 | * this returns, power and/or clocks can be removed at any point | |
275 | * from this CPU and its cache by platform_cpu_kill(). | |
276 | */ | |
aa033810 | 277 | complete(&cpu_died); |
3c030bea | 278 | |
a054a811 | 279 | /* |
51acdfd1 RK |
280 | * Ensure that the cache lines associated with that completion are |
281 | * written out. This covers the case where _this_ CPU is doing the | |
282 | * powering down, to ensure that the completion is visible to the | |
283 | * CPU waiting for this one. | |
284 | */ | |
285 | flush_cache_louis(); | |
286 | ||
287 | /* | |
288 | * The actual CPU shutdown procedure is at least platform (if not | |
289 | * CPU) specific. This may remove power, or it may simply spin. | |
290 | * | |
291 | * Platforms are generally expected *NOT* to return from this call, | |
292 | * although there are some which do because they have no way to | |
293 | * power down the CPU. These platforms are the _only_ reason we | |
294 | * have a return path which uses the fragment of assembly below. | |
295 | * | |
296 | * The return path should not be used for platforms which can | |
297 | * power off the CPU. | |
a054a811 | 298 | */ |
0a301110 RK |
299 | if (smp_ops.cpu_die) |
300 | smp_ops.cpu_die(cpu); | |
a054a811 RK |
301 | |
302 | /* | |
303 | * Do not return to the idle loop - jump back to the secondary | |
304 | * cpu initialisation. There's some initialisation which needs | |
305 | * to be repeated to undo the effects of taking the CPU offline. | |
306 | */ | |
307 | __asm__("mov sp, %0\n" | |
faabfa08 | 308 | " mov fp, #0\n" |
a054a811 RK |
309 | " b secondary_start_kernel" |
310 | : | |
32d39a93 | 311 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); |
a054a811 RK |
312 | } |
313 | #endif /* CONFIG_HOTPLUG_CPU */ | |
314 | ||
05c74a6c RK |
315 | /* |
316 | * Called by both boot and secondaries to move global data into | |
317 | * per-processor storage. | |
318 | */ | |
8bd26e3a | 319 | static void smp_store_cpu_info(unsigned int cpuid) |
05c74a6c RK |
320 | { |
321 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | |
322 | ||
323 | cpu_info->loops_per_jiffy = loops_per_jiffy; | |
e8d432c9 | 324 | cpu_info->cpuid = read_cpuid_id(); |
c9018aab VG |
325 | |
326 | store_cpu_topology(cpuid); | |
05c74a6c RK |
327 | } |
328 | ||
d4578592 MZ |
329 | static void percpu_timer_setup(void); |
330 | ||
e65f38ed RK |
331 | /* |
332 | * This is the secondary CPU boot entry. We're using this CPUs | |
333 | * idle thread stack, but a set of temporary page tables. | |
334 | */ | |
8bd26e3a | 335 | asmlinkage void secondary_start_kernel(void) |
e65f38ed RK |
336 | { |
337 | struct mm_struct *mm = &init_mm; | |
5f40b909 WD |
338 | unsigned int cpu; |
339 | ||
340 | /* | |
341 | * The identity mapping is uncached (strongly ordered), so | |
342 | * switch away from it before attempting any exclusive accesses. | |
343 | */ | |
344 | cpu_switch_mm(mm->pgd, mm); | |
89c7e4b8 | 345 | local_flush_bp_all(); |
5f40b909 WD |
346 | enter_lazy_tlb(mm, current); |
347 | local_flush_tlb_all(); | |
e65f38ed | 348 | |
e65f38ed RK |
349 | /* |
350 | * All kernel threads share the same mm context; grab a | |
351 | * reference and switch to it. | |
352 | */ | |
5f40b909 | 353 | cpu = smp_processor_id(); |
e65f38ed RK |
354 | atomic_inc(&mm->mm_count); |
355 | current->active_mm = mm; | |
56f8ba83 | 356 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
e65f38ed | 357 | |
14318efb RH |
358 | cpu_init(); |
359 | ||
fde165b2 CC |
360 | printk("CPU%u: Booted secondary processor\n", cpu); |
361 | ||
5bfb5d69 | 362 | preempt_disable(); |
2c0136db | 363 | trace_hardirqs_off(); |
e65f38ed RK |
364 | |
365 | /* | |
366 | * Give the platform a chance to do its own initialisation. | |
367 | */ | |
0a301110 RK |
368 | if (smp_ops.smp_secondary_init) |
369 | smp_ops.smp_secondary_init(cpu); | |
e65f38ed | 370 | |
e545a614 | 371 | notify_cpu_starting(cpu); |
a8655e83 | 372 | |
e65f38ed RK |
373 | calibrate_delay(); |
374 | ||
375 | smp_store_cpu_info(cpu); | |
376 | ||
377 | /* | |
573619d1 RK |
378 | * OK, now it's safe to let the boot CPU continue. Wait for |
379 | * the CPU migration code to notice that the CPU is online | |
149c2415 | 380 | * before we continue - which happens after __cpu_up returns. |
e65f38ed | 381 | */ |
e03cdade | 382 | set_cpu_online(cpu, true); |
149c2415 | 383 | complete(&cpu_running); |
eb047454 TG |
384 | |
385 | /* | |
386 | * Setup the percpu timer for this CPU. | |
387 | */ | |
388 | percpu_timer_setup(); | |
389 | ||
eb047454 TG |
390 | local_irq_enable(); |
391 | local_fiq_enable(); | |
392 | ||
e65f38ed RK |
393 | /* |
394 | * OK, it's off to the idle thread for us | |
395 | */ | |
f7b861b7 | 396 | cpu_startup_entry(CPUHP_ONLINE); |
e65f38ed RK |
397 | } |
398 | ||
1da177e4 LT |
399 | void __init smp_cpus_done(unsigned int max_cpus) |
400 | { | |
401 | int cpu; | |
402 | unsigned long bogosum = 0; | |
403 | ||
404 | for_each_online_cpu(cpu) | |
405 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | |
406 | ||
407 | printk(KERN_INFO "SMP: Total of %d processors activated " | |
408 | "(%lu.%02lu BogoMIPS).\n", | |
409 | num_online_cpus(), | |
410 | bogosum / (500000/HZ), | |
411 | (bogosum / (5000/HZ)) % 100); | |
4588c34d DM |
412 | |
413 | hyp_mode_check(); | |
1da177e4 LT |
414 | } |
415 | ||
416 | void __init smp_prepare_boot_cpu(void) | |
417 | { | |
14318efb | 418 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
1da177e4 LT |
419 | } |
420 | ||
05c74a6c | 421 | void __init smp_prepare_cpus(unsigned int max_cpus) |
1da177e4 | 422 | { |
05c74a6c | 423 | unsigned int ncores = num_possible_cpus(); |
1da177e4 | 424 | |
c9018aab VG |
425 | init_cpu_topology(); |
426 | ||
05c74a6c | 427 | smp_store_cpu_info(smp_processor_id()); |
1da177e4 LT |
428 | |
429 | /* | |
05c74a6c | 430 | * are we trying to boot more cores than exist? |
1da177e4 | 431 | */ |
05c74a6c RK |
432 | if (max_cpus > ncores) |
433 | max_cpus = ncores; | |
7fa22bd5 | 434 | if (ncores > 1 && max_cpus) { |
05c74a6c RK |
435 | /* |
436 | * Enable the local timer or broadcast device for the | |
437 | * boot CPU, but only if we have more than one CPU. | |
438 | */ | |
439 | percpu_timer_setup(); | |
1da177e4 | 440 | |
7fa22bd5 SB |
441 | /* |
442 | * Initialise the present map, which describes the set of CPUs | |
443 | * actually populated at the present time. A platform should | |
0a301110 RK |
444 | * re-initialize the map in the platforms smp_prepare_cpus() |
445 | * if present != possible (e.g. physical hotplug). | |
7fa22bd5 | 446 | */ |
0b5f9c00 | 447 | init_cpu_present(cpu_possible_mask); |
7fa22bd5 | 448 | |
05c74a6c RK |
449 | /* |
450 | * Initialise the SCU if there are more than one CPU | |
451 | * and let them know where to start. | |
452 | */ | |
0a301110 RK |
453 | if (smp_ops.smp_prepare_cpus) |
454 | smp_ops.smp_prepare_cpus(max_cpus); | |
05c74a6c | 455 | } |
1da177e4 LT |
456 | } |
457 | ||
0f7b332f RK |
458 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); |
459 | ||
460 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) | |
461 | { | |
b1cffebf RH |
462 | if (!smp_cross_call) |
463 | smp_cross_call = fn; | |
0f7b332f RK |
464 | } |
465 | ||
82668104 | 466 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
1da177e4 | 467 | { |
e3fbb087 | 468 | smp_cross_call(mask, IPI_CALL_FUNC); |
1da177e4 LT |
469 | } |
470 | ||
b62655f4 SG |
471 | void arch_send_wakeup_ipi_mask(const struct cpumask *mask) |
472 | { | |
473 | smp_cross_call(mask, IPI_WAKEUP); | |
474 | } | |
475 | ||
f6dd9fa5 | 476 | void arch_send_call_function_single_ipi(int cpu) |
3e459990 | 477 | { |
e3fbb087 | 478 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
3e459990 | 479 | } |
3e459990 | 480 | |
4a88abd7 | 481 | static const char *ipi_types[NR_IPI] = { |
559a5939 SB |
482 | #define S(x,s) [x] = s |
483 | S(IPI_WAKEUP, "CPU wakeup interrupts"), | |
4a88abd7 RK |
484 | S(IPI_TIMER, "Timer broadcast interrupts"), |
485 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), | |
486 | S(IPI_CALL_FUNC, "Function call interrupts"), | |
487 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), | |
488 | S(IPI_CPU_STOP, "CPU stop interrupts"), | |
489 | }; | |
490 | ||
f13cd417 | 491 | void show_ipi_list(struct seq_file *p, int prec) |
1da177e4 | 492 | { |
4a88abd7 | 493 | unsigned int cpu, i; |
1da177e4 | 494 | |
4a88abd7 RK |
495 | for (i = 0; i < NR_IPI; i++) { |
496 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); | |
1da177e4 | 497 | |
026b7c6b | 498 | for_each_online_cpu(cpu) |
4a88abd7 RK |
499 | seq_printf(p, "%10u ", |
500 | __get_irq_stat(cpu, ipi_irqs[i])); | |
1da177e4 | 501 | |
4a88abd7 RK |
502 | seq_printf(p, " %s\n", ipi_types[i]); |
503 | } | |
1da177e4 LT |
504 | } |
505 | ||
b54992fe | 506 | u64 smp_irq_stat_cpu(unsigned int cpu) |
37ee16ae | 507 | { |
b54992fe RK |
508 | u64 sum = 0; |
509 | int i; | |
37ee16ae | 510 | |
b54992fe RK |
511 | for (i = 0; i < NR_IPI; i++) |
512 | sum += __get_irq_stat(cpu, ipi_irqs[i]); | |
37ee16ae | 513 | |
b54992fe | 514 | return sum; |
37ee16ae RK |
515 | } |
516 | ||
bc28248e RK |
517 | /* |
518 | * Timer (local or broadcast) support | |
519 | */ | |
520 | static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); | |
521 | ||
bc28248e | 522 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
3d06770e | 523 | void tick_broadcast(const struct cpumask *mask) |
bc28248e | 524 | { |
e3fbb087 | 525 | smp_cross_call(mask, IPI_TIMER); |
bc28248e | 526 | } |
5388a6b2 | 527 | #endif |
bc28248e RK |
528 | |
529 | static void broadcast_timer_set_mode(enum clock_event_mode mode, | |
530 | struct clock_event_device *evt) | |
531 | { | |
532 | } | |
533 | ||
8bd26e3a | 534 | static void broadcast_timer_setup(struct clock_event_device *evt) |
bc28248e RK |
535 | { |
536 | evt->name = "dummy_timer"; | |
537 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | |
538 | CLOCK_EVT_FEAT_PERIODIC | | |
539 | CLOCK_EVT_FEAT_DUMMY; | |
f7db706b | 540 | evt->rating = 100; |
bc28248e RK |
541 | evt->mult = 1; |
542 | evt->set_mode = broadcast_timer_set_mode; | |
bc28248e RK |
543 | |
544 | clockevents_register_device(evt); | |
545 | } | |
bc28248e | 546 | |
0ef330e1 MZ |
547 | static struct local_timer_ops *lt_ops; |
548 | ||
549 | #ifdef CONFIG_LOCAL_TIMERS | |
550 | int local_timer_register(struct local_timer_ops *ops) | |
551 | { | |
bfa05f4f MZ |
552 | if (!is_smp() || !setup_max_cpus) |
553 | return -ENXIO; | |
554 | ||
0ef330e1 MZ |
555 | if (lt_ops) |
556 | return -EBUSY; | |
557 | ||
558 | lt_ops = ops; | |
559 | return 0; | |
560 | } | |
561 | #endif | |
562 | ||
8bd26e3a | 563 | static void percpu_timer_setup(void) |
bc28248e RK |
564 | { |
565 | unsigned int cpu = smp_processor_id(); | |
566 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | |
567 | ||
568 | evt->cpumask = cpumask_of(cpu); | |
569 | ||
d4578592 | 570 | if (!lt_ops || lt_ops->setup(evt)) |
af90f10d | 571 | broadcast_timer_setup(evt); |
bc28248e RK |
572 | } |
573 | ||
10034aab RK |
574 | #ifdef CONFIG_HOTPLUG_CPU |
575 | /* | |
576 | * The generic clock events code purposely does not stop the local timer | |
577 | * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it | |
578 | * manually here. | |
579 | */ | |
580 | static void percpu_timer_stop(void) | |
581 | { | |
582 | unsigned int cpu = smp_processor_id(); | |
583 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | |
584 | ||
d4578592 MZ |
585 | if (lt_ops) |
586 | lt_ops->stop(evt); | |
10034aab RK |
587 | } |
588 | #endif | |
589 | ||
bd31b859 | 590 | static DEFINE_RAW_SPINLOCK(stop_lock); |
1da177e4 LT |
591 | |
592 | /* | |
593 | * ipi_cpu_stop - handle IPI from smp_send_stop() | |
594 | */ | |
595 | static void ipi_cpu_stop(unsigned int cpu) | |
596 | { | |
3d3f78d7 RK |
597 | if (system_state == SYSTEM_BOOTING || |
598 | system_state == SYSTEM_RUNNING) { | |
bd31b859 | 599 | raw_spin_lock(&stop_lock); |
3d3f78d7 RK |
600 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); |
601 | dump_stack(); | |
bd31b859 | 602 | raw_spin_unlock(&stop_lock); |
3d3f78d7 | 603 | } |
1da177e4 | 604 | |
e03cdade | 605 | set_cpu_online(cpu, false); |
1da177e4 LT |
606 | |
607 | local_fiq_disable(); | |
608 | local_irq_disable(); | |
609 | ||
610 | while (1) | |
611 | cpu_relax(); | |
612 | } | |
613 | ||
614 | /* | |
615 | * Main handler for inter-processor interrupts | |
1da177e4 | 616 | */ |
4073723a | 617 | asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) |
0b5a1b95 SG |
618 | { |
619 | handle_IPI(ipinr, regs); | |
620 | } | |
621 | ||
622 | void handle_IPI(int ipinr, struct pt_regs *regs) | |
1da177e4 LT |
623 | { |
624 | unsigned int cpu = smp_processor_id(); | |
c97d4869 | 625 | struct pt_regs *old_regs = set_irq_regs(regs); |
1da177e4 | 626 | |
559a5939 SB |
627 | if (ipinr < NR_IPI) |
628 | __inc_irq_stat(cpu, ipi_irqs[ipinr]); | |
1da177e4 | 629 | |
24480d98 | 630 | switch (ipinr) { |
559a5939 SB |
631 | case IPI_WAKEUP: |
632 | break; | |
633 | ||
e2c50119 | 634 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
24480d98 | 635 | case IPI_TIMER: |
7deabca0 | 636 | irq_enter(); |
e2c50119 | 637 | tick_receive_broadcast(); |
7deabca0 | 638 | irq_exit(); |
24480d98 | 639 | break; |
e2c50119 | 640 | #endif |
1da177e4 | 641 | |
24480d98 | 642 | case IPI_RESCHEDULE: |
184748cc | 643 | scheduler_ipi(); |
24480d98 | 644 | break; |
1da177e4 | 645 | |
24480d98 | 646 | case IPI_CALL_FUNC: |
7deabca0 | 647 | irq_enter(); |
24480d98 | 648 | generic_smp_call_function_interrupt(); |
7deabca0 | 649 | irq_exit(); |
24480d98 | 650 | break; |
f6dd9fa5 | 651 | |
24480d98 | 652 | case IPI_CALL_FUNC_SINGLE: |
7deabca0 | 653 | irq_enter(); |
24480d98 | 654 | generic_smp_call_function_single_interrupt(); |
7deabca0 | 655 | irq_exit(); |
24480d98 | 656 | break; |
1da177e4 | 657 | |
24480d98 | 658 | case IPI_CPU_STOP: |
7deabca0 | 659 | irq_enter(); |
24480d98 | 660 | ipi_cpu_stop(cpu); |
7deabca0 | 661 | irq_exit(); |
24480d98 | 662 | break; |
1da177e4 | 663 | |
24480d98 RK |
664 | default: |
665 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", | |
666 | cpu, ipinr); | |
667 | break; | |
1da177e4 | 668 | } |
c97d4869 | 669 | set_irq_regs(old_regs); |
1da177e4 LT |
670 | } |
671 | ||
672 | void smp_send_reschedule(int cpu) | |
673 | { | |
e3fbb087 | 674 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
1da177e4 LT |
675 | } |
676 | ||
1da177e4 LT |
677 | void smp_send_stop(void) |
678 | { | |
28e18293 | 679 | unsigned long timeout; |
6fa99b7f | 680 | struct cpumask mask; |
1da177e4 | 681 | |
6fa99b7f WD |
682 | cpumask_copy(&mask, cpu_online_mask); |
683 | cpumask_clear_cpu(smp_processor_id(), &mask); | |
c5dff4ff JMC |
684 | if (!cpumask_empty(&mask)) |
685 | smp_cross_call(&mask, IPI_CPU_STOP); | |
4b0ef3b1 | 686 | |
28e18293 RK |
687 | /* Wait up to one second for other CPUs to stop */ |
688 | timeout = USEC_PER_SEC; | |
689 | while (num_online_cpus() > 1 && timeout--) | |
690 | udelay(1); | |
4b0ef3b1 | 691 | |
28e18293 RK |
692 | if (num_online_cpus() > 1) |
693 | pr_warning("SMP: failed to stop secondary CPUs\n"); | |
4b0ef3b1 RK |
694 | } |
695 | ||
4b0ef3b1 | 696 | /* |
1da177e4 | 697 | * not supported here |
4b0ef3b1 | 698 | */ |
5048bcba | 699 | int setup_profiling_timer(unsigned int multiplier) |
4b0ef3b1 | 700 | { |
1da177e4 | 701 | return -EINVAL; |
4b0ef3b1 | 702 | } |
ec971ea5 RZ |
703 | |
704 | #ifdef CONFIG_CPU_FREQ | |
705 | ||
706 | static DEFINE_PER_CPU(unsigned long, l_p_j_ref); | |
707 | static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq); | |
708 | static unsigned long global_l_p_j_ref; | |
709 | static unsigned long global_l_p_j_ref_freq; | |
710 | ||
711 | static int cpufreq_callback(struct notifier_block *nb, | |
712 | unsigned long val, void *data) | |
713 | { | |
714 | struct cpufreq_freqs *freq = data; | |
715 | int cpu = freq->cpu; | |
716 | ||
717 | if (freq->flags & CPUFREQ_CONST_LOOPS) | |
718 | return NOTIFY_OK; | |
719 | ||
720 | if (!per_cpu(l_p_j_ref, cpu)) { | |
721 | per_cpu(l_p_j_ref, cpu) = | |
722 | per_cpu(cpu_data, cpu).loops_per_jiffy; | |
723 | per_cpu(l_p_j_ref_freq, cpu) = freq->old; | |
724 | if (!global_l_p_j_ref) { | |
725 | global_l_p_j_ref = loops_per_jiffy; | |
726 | global_l_p_j_ref_freq = freq->old; | |
727 | } | |
728 | } | |
729 | ||
730 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | |
731 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | |
732 | (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { | |
733 | loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, | |
734 | global_l_p_j_ref_freq, | |
735 | freq->new); | |
736 | per_cpu(cpu_data, cpu).loops_per_jiffy = | |
737 | cpufreq_scale(per_cpu(l_p_j_ref, cpu), | |
738 | per_cpu(l_p_j_ref_freq, cpu), | |
739 | freq->new); | |
740 | } | |
741 | return NOTIFY_OK; | |
742 | } | |
743 | ||
744 | static struct notifier_block cpufreq_notifier = { | |
745 | .notifier_call = cpufreq_callback, | |
746 | }; | |
747 | ||
748 | static int __init register_cpufreq_notifier(void) | |
749 | { | |
750 | return cpufreq_register_notifier(&cpufreq_notifier, | |
751 | CPUFREQ_TRANSITION_NOTIFIER); | |
752 | } | |
753 | core_initcall(register_cpufreq_notifier); | |
754 | ||
755 | #endif |