Commit | Line | Data |
---|---|---|
08e875c1 CM |
1 | /* |
2 | * SMP initialisation and IPI support | |
3 | * Based on arch/arm/kernel/smp.c | |
4 | * | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
0f078336 | 20 | #include <linux/acpi.h> |
08e875c1 CM |
21 | #include <linux/delay.h> |
22 | #include <linux/init.h> | |
23 | #include <linux/spinlock.h> | |
24 | #include <linux/sched.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/cache.h> | |
27 | #include <linux/profile.h> | |
28 | #include <linux/errno.h> | |
29 | #include <linux/mm.h> | |
30 | #include <linux/err.h> | |
31 | #include <linux/cpu.h> | |
32 | #include <linux/smp.h> | |
33 | #include <linux/seq_file.h> | |
34 | #include <linux/irq.h> | |
35 | #include <linux/percpu.h> | |
36 | #include <linux/clockchips.h> | |
37 | #include <linux/completion.h> | |
38 | #include <linux/of.h> | |
eb631bb5 | 39 | #include <linux/irq_work.h> |
08e875c1 | 40 | |
e039ee4e | 41 | #include <asm/alternative.h> |
08e875c1 CM |
42 | #include <asm/atomic.h> |
43 | #include <asm/cacheflush.h> | |
df857416 | 44 | #include <asm/cpu.h> |
08e875c1 | 45 | #include <asm/cputype.h> |
cd1aebf5 | 46 | #include <asm/cpu_ops.h> |
08e875c1 CM |
47 | #include <asm/mmu_context.h> |
48 | #include <asm/pgtable.h> | |
49 | #include <asm/pgalloc.h> | |
50 | #include <asm/processor.h> | |
4c7aa002 | 51 | #include <asm/smp_plat.h> |
08e875c1 CM |
52 | #include <asm/sections.h> |
53 | #include <asm/tlbflush.h> | |
54 | #include <asm/ptrace.h> | |
377bcff9 | 55 | #include <asm/virt.h> |
08e875c1 | 56 | |
45ed695a NP |
57 | #define CREATE_TRACE_POINTS |
58 | #include <trace/events/ipi.h> | |
59 | ||
08e875c1 CM |
60 | /* |
61 | * as from 2.5, kernels no longer have an init_tasks structure | |
62 | * so we need some other way of telling a new secondary core | |
63 | * where to place its SVC stack | |
64 | */ | |
65 | struct secondary_data secondary_data; | |
08e875c1 CM |
66 | |
67 | enum ipi_msg_type { | |
68 | IPI_RESCHEDULE, | |
69 | IPI_CALL_FUNC, | |
08e875c1 | 70 | IPI_CPU_STOP, |
1f85008e | 71 | IPI_TIMER, |
eb631bb5 | 72 | IPI_IRQ_WORK, |
08e875c1 CM |
73 | }; |
74 | ||
08e875c1 CM |
75 | /* |
76 | * Boot a secondary CPU, and assign it the specified idle task. | |
77 | * This also gives us the initial stack to use for this CPU. | |
78 | */ | |
b8c6453a | 79 | static int boot_secondary(unsigned int cpu, struct task_struct *idle) |
08e875c1 | 80 | { |
652af899 MR |
81 | if (cpu_ops[cpu]->cpu_boot) |
82 | return cpu_ops[cpu]->cpu_boot(cpu); | |
08e875c1 | 83 | |
652af899 | 84 | return -EOPNOTSUPP; |
08e875c1 CM |
85 | } |
86 | ||
87 | static DECLARE_COMPLETION(cpu_running); | |
88 | ||
b8c6453a | 89 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
08e875c1 CM |
90 | { |
91 | int ret; | |
92 | ||
93 | /* | |
94 | * We need to tell the secondary core where to find its stack and the | |
95 | * page tables. | |
96 | */ | |
97 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | |
98 | __flush_dcache_area(&secondary_data, sizeof(secondary_data)); | |
99 | ||
100 | /* | |
101 | * Now bring the CPU into our world. | |
102 | */ | |
103 | ret = boot_secondary(cpu, idle); | |
104 | if (ret == 0) { | |
105 | /* | |
106 | * CPU was successfully started, wait for it to come online or | |
107 | * time out. | |
108 | */ | |
109 | wait_for_completion_timeout(&cpu_running, | |
110 | msecs_to_jiffies(1000)); | |
111 | ||
112 | if (!cpu_online(cpu)) { | |
113 | pr_crit("CPU%u: failed to come online\n", cpu); | |
114 | ret = -EIO; | |
115 | } | |
116 | } else { | |
117 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | |
118 | } | |
119 | ||
120 | secondary_data.stack = NULL; | |
121 | ||
122 | return ret; | |
123 | } | |
124 | ||
f6e763b9 MB |
125 | static void smp_store_cpu_info(unsigned int cpuid) |
126 | { | |
127 | store_cpu_topology(cpuid); | |
128 | } | |
129 | ||
08e875c1 CM |
130 | /* |
131 | * This is the secondary CPU boot entry. We're using this CPUs | |
132 | * idle thread stack, but a set of temporary page tables. | |
133 | */ | |
b8c6453a | 134 | asmlinkage void secondary_start_kernel(void) |
08e875c1 CM |
135 | { |
136 | struct mm_struct *mm = &init_mm; | |
137 | unsigned int cpu = smp_processor_id(); | |
138 | ||
08e875c1 CM |
139 | /* |
140 | * All kernel threads share the same mm context; grab a | |
141 | * reference and switch to it. | |
142 | */ | |
143 | atomic_inc(&mm->mm_count); | |
144 | current->active_mm = mm; | |
145 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | |
146 | ||
71586276 WD |
147 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
148 | printk("CPU%u: Booted secondary processor\n", cpu); | |
149 | ||
08e875c1 CM |
150 | /* |
151 | * TTBR0 is only used for the identity mapping at this stage. Make it | |
152 | * point to zero page to avoid speculatively fetching new entries. | |
153 | */ | |
154 | cpu_set_reserved_ttbr0(); | |
155 | flush_tlb_all(); | |
dd006da2 | 156 | cpu_set_default_tcr_t0sz(); |
08e875c1 CM |
157 | |
158 | preempt_disable(); | |
159 | trace_hardirqs_off(); | |
160 | ||
652af899 MR |
161 | if (cpu_ops[cpu]->cpu_postboot) |
162 | cpu_ops[cpu]->cpu_postboot(); | |
08e875c1 | 163 | |
df857416 MR |
164 | /* |
165 | * Log the CPU info before it is marked online and might get read. | |
166 | */ | |
167 | cpuinfo_store_cpu(); | |
168 | ||
7ade67b5 MZ |
169 | /* |
170 | * Enable GIC and timers. | |
171 | */ | |
172 | notify_cpu_starting(cpu); | |
173 | ||
f6e763b9 MB |
174 | smp_store_cpu_info(cpu); |
175 | ||
08e875c1 CM |
176 | /* |
177 | * OK, now it's safe to let the boot CPU continue. Wait for | |
178 | * the CPU migration code to notice that the CPU is online | |
179 | * before we continue. | |
180 | */ | |
181 | set_cpu_online(cpu, true); | |
b3770b32 | 182 | complete(&cpu_running); |
08e875c1 | 183 | |
d8ed442a | 184 | local_dbg_enable(); |
53ae3acd | 185 | local_irq_enable(); |
b3bf6aa7 | 186 | local_async_enable(); |
53ae3acd | 187 | |
08e875c1 CM |
188 | /* |
189 | * OK, it's off to the idle thread for us | |
190 | */ | |
0087298f | 191 | cpu_startup_entry(CPUHP_ONLINE); |
08e875c1 CM |
192 | } |
193 | ||
9327e2c6 MR |
194 | #ifdef CONFIG_HOTPLUG_CPU |
195 | static int op_cpu_disable(unsigned int cpu) | |
196 | { | |
197 | /* | |
198 | * If we don't have a cpu_die method, abort before we reach the point | |
199 | * of no return. CPU0 may not have an cpu_ops, so test for it. | |
200 | */ | |
201 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) | |
202 | return -EOPNOTSUPP; | |
203 | ||
204 | /* | |
205 | * We may need to abort a hot unplug for some other mechanism-specific | |
206 | * reason. | |
207 | */ | |
208 | if (cpu_ops[cpu]->cpu_disable) | |
209 | return cpu_ops[cpu]->cpu_disable(cpu); | |
210 | ||
211 | return 0; | |
212 | } | |
213 | ||
214 | /* | |
215 | * __cpu_disable runs on the processor to be shutdown. | |
216 | */ | |
217 | int __cpu_disable(void) | |
218 | { | |
219 | unsigned int cpu = smp_processor_id(); | |
220 | int ret; | |
221 | ||
222 | ret = op_cpu_disable(cpu); | |
223 | if (ret) | |
224 | return ret; | |
225 | ||
226 | /* | |
227 | * Take this CPU offline. Once we clear this, we can't return, | |
228 | * and we must not schedule until we're ready to give up the cpu. | |
229 | */ | |
230 | set_cpu_online(cpu, false); | |
231 | ||
232 | /* | |
233 | * OK - migrate IRQs away from this CPU | |
234 | */ | |
235 | migrate_irqs(); | |
236 | ||
237 | /* | |
238 | * Remove this CPU from the vm mask set of all processes. | |
239 | */ | |
240 | clear_tasks_mm_cpumask(cpu); | |
241 | ||
242 | return 0; | |
243 | } | |
244 | ||
c814ca02 AC |
245 | static int op_cpu_kill(unsigned int cpu) |
246 | { | |
247 | /* | |
248 | * If we have no means of synchronising with the dying CPU, then assume | |
249 | * that it is really dead. We can only wait for an arbitrary length of | |
250 | * time and hope that it's dead, so let's skip the wait and just hope. | |
251 | */ | |
252 | if (!cpu_ops[cpu]->cpu_kill) | |
6b99c68c | 253 | return 0; |
c814ca02 AC |
254 | |
255 | return cpu_ops[cpu]->cpu_kill(cpu); | |
256 | } | |
257 | ||
9327e2c6 MR |
258 | /* |
259 | * called on the thread which is asking for a CPU to be shutdown - | |
260 | * waits until shutdown has completed, or it is timed out. | |
261 | */ | |
262 | void __cpu_die(unsigned int cpu) | |
263 | { | |
6b99c68c MR |
264 | int err; |
265 | ||
05981277 | 266 | if (!cpu_wait_death(cpu, 5)) { |
9327e2c6 MR |
267 | pr_crit("CPU%u: cpu didn't die\n", cpu); |
268 | return; | |
269 | } | |
270 | pr_notice("CPU%u: shutdown\n", cpu); | |
c814ca02 AC |
271 | |
272 | /* | |
273 | * Now that the dying CPU is beyond the point of no return w.r.t. | |
274 | * in-kernel synchronisation, try to get the firwmare to help us to | |
275 | * verify that it has really left the kernel before we consider | |
276 | * clobbering anything it might still be using. | |
277 | */ | |
6b99c68c MR |
278 | err = op_cpu_kill(cpu); |
279 | if (err) | |
280 | pr_warn("CPU%d may not have shut down cleanly: %d\n", | |
281 | cpu, err); | |
9327e2c6 MR |
282 | } |
283 | ||
284 | /* | |
285 | * Called from the idle thread for the CPU which has been shutdown. | |
286 | * | |
287 | * Note that we disable IRQs here, but do not re-enable them | |
288 | * before returning to the caller. This is also the behaviour | |
289 | * of the other hotplug-cpu capable cores, so presumably coming | |
290 | * out of idle fixes this. | |
291 | */ | |
292 | void cpu_die(void) | |
293 | { | |
294 | unsigned int cpu = smp_processor_id(); | |
295 | ||
296 | idle_task_exit(); | |
297 | ||
298 | local_irq_disable(); | |
299 | ||
300 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ | |
05981277 | 301 | (void)cpu_report_death(); |
9327e2c6 MR |
302 | |
303 | /* | |
304 | * Actually shutdown the CPU. This must never fail. The specific hotplug | |
305 | * mechanism must perform all required cache maintenance to ensure that | |
306 | * no dirty lines are lost in the process of shutting down the CPU. | |
307 | */ | |
308 | cpu_ops[cpu]->cpu_die(cpu); | |
309 | ||
310 | BUG(); | |
311 | } | |
312 | #endif | |
313 | ||
377bcff9 JR |
314 | static void __init hyp_mode_check(void) |
315 | { | |
316 | if (is_hyp_mode_available()) | |
317 | pr_info("CPU: All CPU(s) started at EL2\n"); | |
318 | else if (is_hyp_mode_mismatched()) | |
319 | WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, | |
320 | "CPU: CPUs started in inconsistent modes"); | |
321 | else | |
322 | pr_info("CPU: All CPU(s) started at EL1\n"); | |
323 | } | |
324 | ||
08e875c1 CM |
325 | void __init smp_cpus_done(unsigned int max_cpus) |
326 | { | |
326b16db | 327 | pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); |
377bcff9 JR |
328 | hyp_mode_check(); |
329 | apply_alternatives_all(); | |
08e875c1 CM |
330 | } |
331 | ||
332 | void __init smp_prepare_boot_cpu(void) | |
333 | { | |
71586276 | 334 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
08e875c1 CM |
335 | } |
336 | ||
0f078336 LP |
337 | static u64 __init of_get_cpu_mpidr(struct device_node *dn) |
338 | { | |
339 | const __be32 *cell; | |
340 | u64 hwid; | |
341 | ||
342 | /* | |
343 | * A cpu node with missing "reg" property is | |
344 | * considered invalid to build a cpu_logical_map | |
345 | * entry. | |
346 | */ | |
347 | cell = of_get_property(dn, "reg", NULL); | |
348 | if (!cell) { | |
349 | pr_err("%s: missing reg property\n", dn->full_name); | |
350 | return INVALID_HWID; | |
351 | } | |
352 | ||
353 | hwid = of_read_number(cell, of_n_addr_cells(dn)); | |
354 | /* | |
355 | * Non affinity bits must be set to 0 in the DT | |
356 | */ | |
357 | if (hwid & ~MPIDR_HWID_BITMASK) { | |
358 | pr_err("%s: invalid reg property\n", dn->full_name); | |
359 | return INVALID_HWID; | |
360 | } | |
361 | return hwid; | |
362 | } | |
363 | ||
364 | /* | |
365 | * Duplicate MPIDRs are a recipe for disaster. Scan all initialized | |
366 | * entries and check for duplicates. If any is found just ignore the | |
367 | * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid | |
368 | * matching valid MPIDR values. | |
369 | */ | |
370 | static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) | |
371 | { | |
372 | unsigned int i; | |
373 | ||
374 | for (i = 1; (i < cpu) && (i < NR_CPUS); i++) | |
375 | if (cpu_logical_map(i) == hwid) | |
376 | return true; | |
377 | return false; | |
378 | } | |
379 | ||
819a8826 LP |
380 | /* |
381 | * Initialize cpu operations for a logical cpu and | |
382 | * set it in the possible mask on success | |
383 | */ | |
384 | static int __init smp_cpu_setup(int cpu) | |
385 | { | |
386 | if (cpu_read_ops(cpu)) | |
387 | return -ENODEV; | |
388 | ||
389 | if (cpu_ops[cpu]->cpu_init(cpu)) | |
390 | return -ENODEV; | |
391 | ||
392 | set_cpu_possible(cpu, true); | |
393 | ||
394 | return 0; | |
395 | } | |
396 | ||
0f078336 LP |
397 | static bool bootcpu_valid __initdata; |
398 | static unsigned int cpu_count = 1; | |
399 | ||
400 | #ifdef CONFIG_ACPI | |
401 | /* | |
402 | * acpi_map_gic_cpu_interface - parse processor MADT entry | |
403 | * | |
404 | * Carry out sanity checks on MADT processor entry and initialize | |
405 | * cpu_logical_map on success | |
406 | */ | |
407 | static void __init | |
408 | acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) | |
409 | { | |
410 | u64 hwid = processor->arm_mpidr; | |
411 | ||
f9058929 HG |
412 | if (!(processor->flags & ACPI_MADT_ENABLED)) { |
413 | pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); | |
0f078336 LP |
414 | return; |
415 | } | |
416 | ||
f9058929 HG |
417 | if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { |
418 | pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); | |
0f078336 LP |
419 | return; |
420 | } | |
421 | ||
422 | if (is_mpidr_duplicate(cpu_count, hwid)) { | |
423 | pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); | |
424 | return; | |
425 | } | |
426 | ||
427 | /* Check if GICC structure of boot CPU is available in the MADT */ | |
428 | if (cpu_logical_map(0) == hwid) { | |
429 | if (bootcpu_valid) { | |
430 | pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", | |
431 | hwid); | |
432 | return; | |
433 | } | |
434 | bootcpu_valid = true; | |
435 | return; | |
436 | } | |
437 | ||
438 | if (cpu_count >= NR_CPUS) | |
439 | return; | |
440 | ||
441 | /* map the logical cpu id to cpu MPIDR */ | |
442 | cpu_logical_map(cpu_count) = hwid; | |
443 | ||
444 | cpu_count++; | |
445 | } | |
446 | ||
447 | static int __init | |
448 | acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, | |
449 | const unsigned long end) | |
450 | { | |
451 | struct acpi_madt_generic_interrupt *processor; | |
452 | ||
453 | processor = (struct acpi_madt_generic_interrupt *)header; | |
99e3e3ae | 454 | if (BAD_MADT_GICC_ENTRY(processor, end)) |
0f078336 LP |
455 | return -EINVAL; |
456 | ||
457 | acpi_table_print_madt_entry(header); | |
458 | ||
459 | acpi_map_gic_cpu_interface(processor); | |
460 | ||
461 | return 0; | |
462 | } | |
463 | #else | |
464 | #define acpi_table_parse_madt(...) do { } while (0) | |
465 | #endif | |
466 | ||
08e875c1 | 467 | /* |
4c7aa002 JM |
468 | * Enumerate the possible CPU set from the device tree and build the |
469 | * cpu logical map array containing MPIDR values related to logical | |
470 | * cpus. Assumes that cpu_logical_map(0) has already been initialized. | |
08e875c1 | 471 | */ |
0f078336 | 472 | void __init of_parse_and_init_cpus(void) |
08e875c1 | 473 | { |
08e875c1 | 474 | struct device_node *dn = NULL; |
08e875c1 CM |
475 | |
476 | while ((dn = of_find_node_by_type(dn, "cpu"))) { | |
0f078336 | 477 | u64 hwid = of_get_cpu_mpidr(dn); |
4c7aa002 | 478 | |
0f078336 | 479 | if (hwid == INVALID_HWID) |
4c7aa002 | 480 | goto next; |
4c7aa002 | 481 | |
0f078336 LP |
482 | if (is_mpidr_duplicate(cpu_count, hwid)) { |
483 | pr_err("%s: duplicate cpu reg properties in the DT\n", | |
484 | dn->full_name); | |
4c7aa002 JM |
485 | goto next; |
486 | } | |
487 | ||
4c7aa002 JM |
488 | /* |
489 | * The numbering scheme requires that the boot CPU | |
490 | * must be assigned logical id 0. Record it so that | |
491 | * the logical map built from DT is validated and can | |
492 | * be used. | |
493 | */ | |
494 | if (hwid == cpu_logical_map(0)) { | |
495 | if (bootcpu_valid) { | |
496 | pr_err("%s: duplicate boot cpu reg property in DT\n", | |
497 | dn->full_name); | |
498 | goto next; | |
499 | } | |
500 | ||
501 | bootcpu_valid = true; | |
502 | ||
503 | /* | |
504 | * cpu_logical_map has already been | |
505 | * initialized and the boot cpu doesn't need | |
506 | * the enable-method so continue without | |
507 | * incrementing cpu. | |
508 | */ | |
509 | continue; | |
510 | } | |
511 | ||
0f078336 | 512 | if (cpu_count >= NR_CPUS) |
08e875c1 CM |
513 | goto next; |
514 | ||
4c7aa002 | 515 | pr_debug("cpu logical map 0x%llx\n", hwid); |
0f078336 | 516 | cpu_logical_map(cpu_count) = hwid; |
08e875c1 | 517 | next: |
0f078336 | 518 | cpu_count++; |
08e875c1 | 519 | } |
0f078336 LP |
520 | } |
521 | ||
522 | /* | |
523 | * Enumerate the possible CPU set from the device tree or ACPI and build the | |
524 | * cpu logical map array containing MPIDR values related to logical | |
525 | * cpus. Assumes that cpu_logical_map(0) has already been initialized. | |
526 | */ | |
527 | void __init smp_init_cpus(void) | |
528 | { | |
529 | int i; | |
530 | ||
531 | if (acpi_disabled) | |
532 | of_parse_and_init_cpus(); | |
533 | else | |
534 | /* | |
535 | * do a walk of MADT to determine how many CPUs | |
536 | * we have including disabled CPUs, and get information | |
537 | * we need for SMP init | |
538 | */ | |
539 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, | |
540 | acpi_parse_gic_cpu_interface, 0); | |
08e875c1 | 541 | |
0f078336 LP |
542 | if (cpu_count > NR_CPUS) |
543 | pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", | |
544 | cpu_count, NR_CPUS); | |
4c7aa002 JM |
545 | |
546 | if (!bootcpu_valid) { | |
0f078336 | 547 | pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); |
4c7aa002 JM |
548 | return; |
549 | } | |
550 | ||
551 | /* | |
819a8826 LP |
552 | * We need to set the cpu_logical_map entries before enabling |
553 | * the cpus so that cpu processor description entries (DT cpu nodes | |
554 | * and ACPI MADT entries) can be retrieved by matching the cpu hwid | |
555 | * with entries in cpu_logical_map while initializing the cpus. | |
556 | * If the cpu set-up fails, invalidate the cpu_logical_map entry. | |
4c7aa002 | 557 | */ |
819a8826 LP |
558 | for (i = 1; i < NR_CPUS; i++) { |
559 | if (cpu_logical_map(i) != INVALID_HWID) { | |
560 | if (smp_cpu_setup(i)) | |
561 | cpu_logical_map(i) = INVALID_HWID; | |
562 | } | |
563 | } | |
08e875c1 CM |
564 | } |
565 | ||
566 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
567 | { | |
cd1aebf5 MR |
568 | int err; |
569 | unsigned int cpu, ncores = num_possible_cpus(); | |
08e875c1 | 570 | |
f6e763b9 MB |
571 | init_cpu_topology(); |
572 | ||
573 | smp_store_cpu_info(smp_processor_id()); | |
574 | ||
08e875c1 CM |
575 | /* |
576 | * are we trying to boot more cores than exist? | |
577 | */ | |
578 | if (max_cpus > ncores) | |
579 | max_cpus = ncores; | |
580 | ||
d329de3f MZ |
581 | /* Don't bother if we're effectively UP */ |
582 | if (max_cpus <= 1) | |
583 | return; | |
584 | ||
08e875c1 CM |
585 | /* |
586 | * Initialise the present map (which describes the set of CPUs | |
587 | * actually populated at the present time) and release the | |
588 | * secondaries from the bootloader. | |
d329de3f MZ |
589 | * |
590 | * Make sure we online at most (max_cpus - 1) additional CPUs. | |
08e875c1 | 591 | */ |
d329de3f | 592 | max_cpus--; |
08e875c1 CM |
593 | for_each_possible_cpu(cpu) { |
594 | if (max_cpus == 0) | |
595 | break; | |
596 | ||
d329de3f MZ |
597 | if (cpu == smp_processor_id()) |
598 | continue; | |
599 | ||
cd1aebf5 | 600 | if (!cpu_ops[cpu]) |
08e875c1 CM |
601 | continue; |
602 | ||
cd1aebf5 | 603 | err = cpu_ops[cpu]->cpu_prepare(cpu); |
d329de3f MZ |
604 | if (err) |
605 | continue; | |
08e875c1 CM |
606 | |
607 | set_cpu_present(cpu, true); | |
608 | max_cpus--; | |
609 | } | |
08e875c1 CM |
610 | } |
611 | ||
36310736 | 612 | void (*__smp_cross_call)(const struct cpumask *, unsigned int); |
08e875c1 CM |
613 | |
614 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) | |
615 | { | |
45ed695a | 616 | __smp_cross_call = fn; |
08e875c1 CM |
617 | } |
618 | ||
45ed695a NP |
619 | static const char *ipi_types[NR_IPI] __tracepoint_string = { |
620 | #define S(x,s) [x] = s | |
08e875c1 CM |
621 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), |
622 | S(IPI_CALL_FUNC, "Function call interrupts"), | |
08e875c1 | 623 | S(IPI_CPU_STOP, "CPU stop interrupts"), |
1f85008e | 624 | S(IPI_TIMER, "Timer broadcast interrupts"), |
eb631bb5 | 625 | S(IPI_IRQ_WORK, "IRQ work interrupts"), |
08e875c1 CM |
626 | }; |
627 | ||
45ed695a NP |
628 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
629 | { | |
630 | trace_ipi_raise(target, ipi_types[ipinr]); | |
631 | __smp_cross_call(target, ipinr); | |
632 | } | |
633 | ||
08e875c1 CM |
634 | void show_ipi_list(struct seq_file *p, int prec) |
635 | { | |
636 | unsigned int cpu, i; | |
637 | ||
638 | for (i = 0; i < NR_IPI; i++) { | |
45ed695a | 639 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, |
08e875c1 | 640 | prec >= 4 ? " " : ""); |
67317c26 | 641 | for_each_online_cpu(cpu) |
08e875c1 CM |
642 | seq_printf(p, "%10u ", |
643 | __get_irq_stat(cpu, ipi_irqs[i])); | |
644 | seq_printf(p, " %s\n", ipi_types[i]); | |
645 | } | |
646 | } | |
647 | ||
648 | u64 smp_irq_stat_cpu(unsigned int cpu) | |
649 | { | |
650 | u64 sum = 0; | |
651 | int i; | |
652 | ||
653 | for (i = 0; i < NR_IPI; i++) | |
654 | sum += __get_irq_stat(cpu, ipi_irqs[i]); | |
655 | ||
656 | return sum; | |
657 | } | |
658 | ||
45ed695a NP |
659 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
660 | { | |
661 | smp_cross_call(mask, IPI_CALL_FUNC); | |
662 | } | |
663 | ||
664 | void arch_send_call_function_single_ipi(int cpu) | |
665 | { | |
0aaf0dae | 666 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); |
45ed695a NP |
667 | } |
668 | ||
669 | #ifdef CONFIG_IRQ_WORK | |
670 | void arch_irq_work_raise(void) | |
671 | { | |
672 | if (__smp_cross_call) | |
673 | smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); | |
674 | } | |
675 | #endif | |
676 | ||
08e875c1 CM |
677 | static DEFINE_RAW_SPINLOCK(stop_lock); |
678 | ||
679 | /* | |
680 | * ipi_cpu_stop - handle IPI from smp_send_stop() | |
681 | */ | |
682 | static void ipi_cpu_stop(unsigned int cpu) | |
683 | { | |
684 | if (system_state == SYSTEM_BOOTING || | |
685 | system_state == SYSTEM_RUNNING) { | |
686 | raw_spin_lock(&stop_lock); | |
687 | pr_crit("CPU%u: stopping\n", cpu); | |
688 | dump_stack(); | |
689 | raw_spin_unlock(&stop_lock); | |
690 | } | |
691 | ||
692 | set_cpu_online(cpu, false); | |
693 | ||
08e875c1 CM |
694 | local_irq_disable(); |
695 | ||
696 | while (1) | |
697 | cpu_relax(); | |
698 | } | |
699 | ||
700 | /* | |
701 | * Main handler for inter-processor interrupts | |
702 | */ | |
703 | void handle_IPI(int ipinr, struct pt_regs *regs) | |
704 | { | |
705 | unsigned int cpu = smp_processor_id(); | |
706 | struct pt_regs *old_regs = set_irq_regs(regs); | |
707 | ||
45ed695a | 708 | if ((unsigned)ipinr < NR_IPI) { |
be081d9b | 709 | trace_ipi_entry_rcuidle(ipi_types[ipinr]); |
45ed695a NP |
710 | __inc_irq_stat(cpu, ipi_irqs[ipinr]); |
711 | } | |
08e875c1 CM |
712 | |
713 | switch (ipinr) { | |
714 | case IPI_RESCHEDULE: | |
715 | scheduler_ipi(); | |
716 | break; | |
717 | ||
718 | case IPI_CALL_FUNC: | |
719 | irq_enter(); | |
720 | generic_smp_call_function_interrupt(); | |
721 | irq_exit(); | |
722 | break; | |
723 | ||
08e875c1 CM |
724 | case IPI_CPU_STOP: |
725 | irq_enter(); | |
726 | ipi_cpu_stop(cpu); | |
727 | irq_exit(); | |
728 | break; | |
729 | ||
1f85008e LP |
730 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
731 | case IPI_TIMER: | |
732 | irq_enter(); | |
733 | tick_receive_broadcast(); | |
734 | irq_exit(); | |
735 | break; | |
736 | #endif | |
737 | ||
eb631bb5 LB |
738 | #ifdef CONFIG_IRQ_WORK |
739 | case IPI_IRQ_WORK: | |
740 | irq_enter(); | |
741 | irq_work_run(); | |
742 | irq_exit(); | |
743 | break; | |
744 | #endif | |
745 | ||
08e875c1 CM |
746 | default: |
747 | pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); | |
748 | break; | |
749 | } | |
45ed695a NP |
750 | |
751 | if ((unsigned)ipinr < NR_IPI) | |
be081d9b | 752 | trace_ipi_exit_rcuidle(ipi_types[ipinr]); |
08e875c1 CM |
753 | set_irq_regs(old_regs); |
754 | } | |
755 | ||
756 | void smp_send_reschedule(int cpu) | |
757 | { | |
758 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); | |
759 | } | |
760 | ||
1f85008e LP |
761 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
762 | void tick_broadcast(const struct cpumask *mask) | |
763 | { | |
764 | smp_cross_call(mask, IPI_TIMER); | |
765 | } | |
766 | #endif | |
767 | ||
08e875c1 CM |
768 | void smp_send_stop(void) |
769 | { | |
770 | unsigned long timeout; | |
771 | ||
772 | if (num_online_cpus() > 1) { | |
773 | cpumask_t mask; | |
774 | ||
775 | cpumask_copy(&mask, cpu_online_mask); | |
434ed7f4 | 776 | cpumask_clear_cpu(smp_processor_id(), &mask); |
08e875c1 CM |
777 | |
778 | smp_cross_call(&mask, IPI_CPU_STOP); | |
779 | } | |
780 | ||
781 | /* Wait up to one second for other CPUs to stop */ | |
782 | timeout = USEC_PER_SEC; | |
783 | while (num_online_cpus() > 1 && timeout--) | |
784 | udelay(1); | |
785 | ||
786 | if (num_online_cpus() > 1) | |
787 | pr_warning("SMP: failed to stop secondary CPUs\n"); | |
788 | } | |
789 | ||
790 | /* | |
791 | * not supported here | |
792 | */ | |
793 | int setup_profiling_timer(unsigned int multiplier) | |
794 | { | |
795 | return -EINVAL; | |
796 | } |