powerpc: kernel: remove useless code which related with 'max_cpus'
[deliverable/linux.git] / arch / powerpc / kernel / smp.c
1 /*
2 * SMP support for ppc.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
6 *
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8 *
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18 #undef DEBUG
19
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/device.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
34
35 #include <asm/ptrace.h>
36 #include <linux/atomic.h>
37 #include <asm/irq.h>
38 #include <asm/page.h>
39 #include <asm/pgtable.h>
40 #include <asm/prom.h>
41 #include <asm/smp.h>
42 #include <asm/time.h>
43 #include <asm/machdep.h>
44 #include <asm/cputhreads.h>
45 #include <asm/cputable.h>
46 #include <asm/mpic.h>
47 #include <asm/vdso_datapage.h>
48 #ifdef CONFIG_PPC64
49 #include <asm/paca.h>
50 #endif
51 #include <asm/vdso.h>
52 #include <asm/debug.h>
53
54 #ifdef DEBUG
55 #include <asm/udbg.h>
56 #define DBG(fmt...) udbg_printf(fmt)
57 #else
58 #define DBG(fmt...)
59 #endif
60
61 #ifdef CONFIG_HOTPLUG_CPU
62 /* State of each CPU during hotplug phases */
63 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
64 #endif
65
66 struct thread_info *secondary_ti;
67
68 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
69 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
70
71 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
72 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
73
74 /* SMP operations for this machine */
75 struct smp_ops_t *smp_ops;
76
77 /* Can't be static due to PowerMac hackery */
78 volatile unsigned int cpu_callin_map[NR_CPUS];
79
80 int smt_enabled_at_boot = 1;
81
82 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
83
84 /*
85 * Returns 1 if the specified cpu should be brought up during boot.
86 * Used to inhibit booting threads if they've been disabled or
87 * limited on the command line
88 */
89 int smp_generic_cpu_bootable(unsigned int nr)
90 {
91 /* Special case - we inhibit secondary thread startup
92 * during boot if the user requests it.
93 */
94 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
95 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
96 return 0;
97 if (smt_enabled_at_boot
98 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
99 return 0;
100 }
101
102 return 1;
103 }
104
105
106 #ifdef CONFIG_PPC64
107 int smp_generic_kick_cpu(int nr)
108 {
109 BUG_ON(nr < 0 || nr >= NR_CPUS);
110
111 /*
112 * The processor is currently spinning, waiting for the
113 * cpu_start field to become non-zero After we set cpu_start,
114 * the processor will continue on to secondary_start
115 */
116 if (!paca[nr].cpu_start) {
117 paca[nr].cpu_start = 1;
118 smp_mb();
119 return 0;
120 }
121
122 #ifdef CONFIG_HOTPLUG_CPU
123 /*
124 * Ok it's not there, so it might be soft-unplugged, let's
125 * try to bring it back
126 */
127 generic_set_cpu_up(nr);
128 smp_wmb();
129 smp_send_reschedule(nr);
130 #endif /* CONFIG_HOTPLUG_CPU */
131
132 return 0;
133 }
134 #endif /* CONFIG_PPC64 */
135
136 static irqreturn_t call_function_action(int irq, void *data)
137 {
138 generic_smp_call_function_interrupt();
139 return IRQ_HANDLED;
140 }
141
142 static irqreturn_t reschedule_action(int irq, void *data)
143 {
144 scheduler_ipi();
145 return IRQ_HANDLED;
146 }
147
148 static irqreturn_t call_function_single_action(int irq, void *data)
149 {
150 generic_smp_call_function_single_interrupt();
151 return IRQ_HANDLED;
152 }
153
154 static irqreturn_t debug_ipi_action(int irq, void *data)
155 {
156 if (crash_ipi_function_ptr) {
157 crash_ipi_function_ptr(get_irq_regs());
158 return IRQ_HANDLED;
159 }
160
161 #ifdef CONFIG_DEBUGGER
162 debugger_ipi(get_irq_regs());
163 #endif /* CONFIG_DEBUGGER */
164
165 return IRQ_HANDLED;
166 }
167
168 static irq_handler_t smp_ipi_action[] = {
169 [PPC_MSG_CALL_FUNCTION] = call_function_action,
170 [PPC_MSG_RESCHEDULE] = reschedule_action,
171 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
172 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
173 };
174
175 const char *smp_ipi_name[] = {
176 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
177 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
178 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
179 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
180 };
181
182 /* optional function to request ipi, for controllers with >= 4 ipis */
183 int smp_request_message_ipi(int virq, int msg)
184 {
185 int err;
186
187 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
188 return -EINVAL;
189 }
190 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
191 if (msg == PPC_MSG_DEBUGGER_BREAK) {
192 return 1;
193 }
194 #endif
195 err = request_irq(virq, smp_ipi_action[msg],
196 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
197 smp_ipi_name[msg], NULL);
198 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
199 virq, smp_ipi_name[msg], err);
200
201 return err;
202 }
203
204 #ifdef CONFIG_PPC_SMP_MUXED_IPI
205 struct cpu_messages {
206 int messages; /* current messages */
207 unsigned long data; /* data for cause ipi */
208 };
209 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
210
211 void smp_muxed_ipi_set_data(int cpu, unsigned long data)
212 {
213 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
214
215 info->data = data;
216 }
217
218 void smp_muxed_ipi_message_pass(int cpu, int msg)
219 {
220 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
221 char *message = (char *)&info->messages;
222
223 /*
224 * Order previous accesses before accesses in the IPI handler.
225 */
226 smp_mb();
227 message[msg] = 1;
228 /*
229 * cause_ipi functions are required to include a full barrier
230 * before doing whatever causes the IPI.
231 */
232 smp_ops->cause_ipi(cpu, info->data);
233 }
234
235 #ifdef __BIG_ENDIAN__
236 #define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
237 #else
238 #define IPI_MESSAGE(A) (1 << (8 * (A)))
239 #endif
240
241 irqreturn_t smp_ipi_demux(void)
242 {
243 struct cpu_messages *info = &__get_cpu_var(ipi_message);
244 unsigned int all;
245
246 mb(); /* order any irq clear */
247
248 do {
249 all = xchg(&info->messages, 0);
250 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
251 generic_smp_call_function_interrupt();
252 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
253 scheduler_ipi();
254 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNC_SINGLE))
255 generic_smp_call_function_single_interrupt();
256 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
257 debug_ipi_action(0, NULL);
258 } while (info->messages);
259
260 return IRQ_HANDLED;
261 }
262 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
263
264 static inline void do_message_pass(int cpu, int msg)
265 {
266 if (smp_ops->message_pass)
267 smp_ops->message_pass(cpu, msg);
268 #ifdef CONFIG_PPC_SMP_MUXED_IPI
269 else
270 smp_muxed_ipi_message_pass(cpu, msg);
271 #endif
272 }
273
274 void smp_send_reschedule(int cpu)
275 {
276 if (likely(smp_ops))
277 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
278 }
279 EXPORT_SYMBOL_GPL(smp_send_reschedule);
280
281 void arch_send_call_function_single_ipi(int cpu)
282 {
283 do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
284 }
285
286 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
287 {
288 unsigned int cpu;
289
290 for_each_cpu(cpu, mask)
291 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
292 }
293
294 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
295 void smp_send_debugger_break(void)
296 {
297 int cpu;
298 int me = raw_smp_processor_id();
299
300 if (unlikely(!smp_ops))
301 return;
302
303 for_each_online_cpu(cpu)
304 if (cpu != me)
305 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
306 }
307 #endif
308
309 #ifdef CONFIG_KEXEC
310 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
311 {
312 crash_ipi_function_ptr = crash_ipi_callback;
313 if (crash_ipi_callback) {
314 mb();
315 smp_send_debugger_break();
316 }
317 }
318 #endif
319
320 static void stop_this_cpu(void *dummy)
321 {
322 /* Remove this CPU */
323 set_cpu_online(smp_processor_id(), false);
324
325 local_irq_disable();
326 while (1)
327 ;
328 }
329
330 void smp_send_stop(void)
331 {
332 smp_call_function(stop_this_cpu, NULL, 0);
333 }
334
335 struct thread_info *current_set[NR_CPUS];
336
337 static void smp_store_cpu_info(int id)
338 {
339 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
340 #ifdef CONFIG_PPC_FSL_BOOK3E
341 per_cpu(next_tlbcam_idx, id)
342 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
343 #endif
344 }
345
346 void __init smp_prepare_cpus(unsigned int max_cpus)
347 {
348 unsigned int cpu;
349
350 DBG("smp_prepare_cpus\n");
351
352 /*
353 * setup_cpu may need to be called on the boot cpu. We havent
354 * spun any cpus up but lets be paranoid.
355 */
356 BUG_ON(boot_cpuid != smp_processor_id());
357
358 /* Fixup boot cpu */
359 smp_store_cpu_info(boot_cpuid);
360 cpu_callin_map[boot_cpuid] = 1;
361
362 for_each_possible_cpu(cpu) {
363 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
364 GFP_KERNEL, cpu_to_node(cpu));
365 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
366 GFP_KERNEL, cpu_to_node(cpu));
367 }
368
369 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
370 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
371
372 if (smp_ops && smp_ops->probe)
373 smp_ops->probe();
374 }
375
376 void smp_prepare_boot_cpu(void)
377 {
378 BUG_ON(smp_processor_id() != boot_cpuid);
379 #ifdef CONFIG_PPC64
380 paca[boot_cpuid].__current = current;
381 #endif
382 current_set[boot_cpuid] = task_thread_info(current);
383 }
384
385 #ifdef CONFIG_HOTPLUG_CPU
386
387 int generic_cpu_disable(void)
388 {
389 unsigned int cpu = smp_processor_id();
390
391 if (cpu == boot_cpuid)
392 return -EBUSY;
393
394 set_cpu_online(cpu, false);
395 #ifdef CONFIG_PPC64
396 vdso_data->processorCount--;
397 #endif
398 migrate_irqs();
399 return 0;
400 }
401
402 void generic_cpu_die(unsigned int cpu)
403 {
404 int i;
405
406 for (i = 0; i < 100; i++) {
407 smp_rmb();
408 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
409 return;
410 msleep(100);
411 }
412 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
413 }
414
415 void generic_mach_cpu_die(void)
416 {
417 unsigned int cpu;
418
419 local_irq_disable();
420 idle_task_exit();
421 cpu = smp_processor_id();
422 printk(KERN_DEBUG "CPU%d offline\n", cpu);
423 __get_cpu_var(cpu_state) = CPU_DEAD;
424 smp_wmb();
425 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
426 cpu_relax();
427 }
428
429 void generic_set_cpu_dead(unsigned int cpu)
430 {
431 per_cpu(cpu_state, cpu) = CPU_DEAD;
432 }
433
434 /*
435 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
436 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
437 * which makes the delay in generic_cpu_die() not happen.
438 */
439 void generic_set_cpu_up(unsigned int cpu)
440 {
441 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
442 }
443
444 int generic_check_cpu_restart(unsigned int cpu)
445 {
446 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
447 }
448
449 static atomic_t secondary_inhibit_count;
450
451 /*
452 * Don't allow secondary CPU threads to come online
453 */
454 void inhibit_secondary_onlining(void)
455 {
456 /*
457 * This makes secondary_inhibit_count stable during cpu
458 * online/offline operations.
459 */
460 get_online_cpus();
461
462 atomic_inc(&secondary_inhibit_count);
463 put_online_cpus();
464 }
465 EXPORT_SYMBOL_GPL(inhibit_secondary_onlining);
466
467 /*
468 * Allow secondary CPU threads to come online again
469 */
470 void uninhibit_secondary_onlining(void)
471 {
472 get_online_cpus();
473 atomic_dec(&secondary_inhibit_count);
474 put_online_cpus();
475 }
476 EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining);
477
478 static int secondaries_inhibited(void)
479 {
480 return atomic_read(&secondary_inhibit_count);
481 }
482
483 #else /* HOTPLUG_CPU */
484
485 #define secondaries_inhibited() 0
486
487 #endif
488
489 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
490 {
491 struct thread_info *ti = task_thread_info(idle);
492
493 #ifdef CONFIG_PPC64
494 paca[cpu].__current = idle;
495 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
496 #endif
497 ti->cpu = cpu;
498 secondary_ti = current_set[cpu] = ti;
499 }
500
501 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
502 {
503 int rc, c;
504
505 /*
506 * Don't allow secondary threads to come online if inhibited
507 */
508 if (threads_per_core > 1 && secondaries_inhibited() &&
509 cpu % threads_per_core != 0)
510 return -EBUSY;
511
512 if (smp_ops == NULL ||
513 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
514 return -EINVAL;
515
516 cpu_idle_thread_init(cpu, tidle);
517
518 /* Make sure callin-map entry is 0 (can be leftover a CPU
519 * hotplug
520 */
521 cpu_callin_map[cpu] = 0;
522
523 /* The information for processor bringup must
524 * be written out to main store before we release
525 * the processor.
526 */
527 smp_mb();
528
529 /* wake up cpus */
530 DBG("smp: kicking cpu %d\n", cpu);
531 rc = smp_ops->kick_cpu(cpu);
532 if (rc) {
533 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
534 return rc;
535 }
536
537 /*
538 * wait to see if the cpu made a callin (is actually up).
539 * use this value that I found through experimentation.
540 * -- Cort
541 */
542 if (system_state < SYSTEM_RUNNING)
543 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
544 udelay(100);
545 #ifdef CONFIG_HOTPLUG_CPU
546 else
547 /*
548 * CPUs can take much longer to come up in the
549 * hotplug case. Wait five seconds.
550 */
551 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
552 msleep(1);
553 #endif
554
555 if (!cpu_callin_map[cpu]) {
556 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
557 return -ENOENT;
558 }
559
560 DBG("Processor %u found.\n", cpu);
561
562 if (smp_ops->give_timebase)
563 smp_ops->give_timebase();
564
565 /* Wait until cpu puts itself in the online map */
566 while (!cpu_online(cpu))
567 cpu_relax();
568
569 return 0;
570 }
571
572 /* Return the value of the reg property corresponding to the given
573 * logical cpu.
574 */
575 int cpu_to_core_id(int cpu)
576 {
577 struct device_node *np;
578 const int *reg;
579 int id = -1;
580
581 np = of_get_cpu_node(cpu, NULL);
582 if (!np)
583 goto out;
584
585 reg = of_get_property(np, "reg", NULL);
586 if (!reg)
587 goto out;
588
589 id = *reg;
590 out:
591 of_node_put(np);
592 return id;
593 }
594
595 /* Helper routines for cpu to core mapping */
596 int cpu_core_index_of_thread(int cpu)
597 {
598 return cpu >> threads_shift;
599 }
600 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
601
602 int cpu_first_thread_of_core(int core)
603 {
604 return core << threads_shift;
605 }
606 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
607
608 static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
609 {
610 const struct cpumask *mask;
611 struct device_node *np;
612 int i, plen;
613 const __be32 *prop;
614
615 mask = add ? cpu_online_mask : cpu_present_mask;
616 for_each_cpu(i, mask) {
617 np = of_get_cpu_node(i, NULL);
618 if (!np)
619 continue;
620 prop = of_get_property(np, "ibm,chip-id", &plen);
621 if (prop && plen == sizeof(int) &&
622 of_read_number(prop, 1) == chipid) {
623 if (add) {
624 cpumask_set_cpu(cpu, cpu_core_mask(i));
625 cpumask_set_cpu(i, cpu_core_mask(cpu));
626 } else {
627 cpumask_clear_cpu(cpu, cpu_core_mask(i));
628 cpumask_clear_cpu(i, cpu_core_mask(cpu));
629 }
630 }
631 of_node_put(np);
632 }
633 }
634
635 /* Must be called when no change can occur to cpu_present_mask,
636 * i.e. during cpu online or offline.
637 */
638 static struct device_node *cpu_to_l2cache(int cpu)
639 {
640 struct device_node *np;
641 struct device_node *cache;
642
643 if (!cpu_present(cpu))
644 return NULL;
645
646 np = of_get_cpu_node(cpu, NULL);
647 if (np == NULL)
648 return NULL;
649
650 cache = of_find_next_cache_node(np);
651
652 of_node_put(np);
653
654 return cache;
655 }
656
657 static void traverse_core_siblings(int cpu, bool add)
658 {
659 struct device_node *l2_cache, *np;
660 const struct cpumask *mask;
661 int i, chip, plen;
662 const __be32 *prop;
663
664 /* First see if we have ibm,chip-id properties in cpu nodes */
665 np = of_get_cpu_node(cpu, NULL);
666 if (np) {
667 chip = -1;
668 prop = of_get_property(np, "ibm,chip-id", &plen);
669 if (prop && plen == sizeof(int))
670 chip = of_read_number(prop, 1);
671 of_node_put(np);
672 if (chip >= 0) {
673 traverse_siblings_chip_id(cpu, add, chip);
674 return;
675 }
676 }
677
678 l2_cache = cpu_to_l2cache(cpu);
679 mask = add ? cpu_online_mask : cpu_present_mask;
680 for_each_cpu(i, mask) {
681 np = cpu_to_l2cache(i);
682 if (!np)
683 continue;
684 if (np == l2_cache) {
685 if (add) {
686 cpumask_set_cpu(cpu, cpu_core_mask(i));
687 cpumask_set_cpu(i, cpu_core_mask(cpu));
688 } else {
689 cpumask_clear_cpu(cpu, cpu_core_mask(i));
690 cpumask_clear_cpu(i, cpu_core_mask(cpu));
691 }
692 }
693 of_node_put(np);
694 }
695 of_node_put(l2_cache);
696 }
697
698 /* Activate a secondary processor. */
699 void start_secondary(void *unused)
700 {
701 unsigned int cpu = smp_processor_id();
702 int i, base;
703
704 atomic_inc(&init_mm.mm_count);
705 current->active_mm = &init_mm;
706
707 smp_store_cpu_info(cpu);
708 set_dec(tb_ticks_per_jiffy);
709 preempt_disable();
710 cpu_callin_map[cpu] = 1;
711
712 if (smp_ops->setup_cpu)
713 smp_ops->setup_cpu(cpu);
714 if (smp_ops->take_timebase)
715 smp_ops->take_timebase();
716
717 secondary_cpu_time_init();
718
719 #ifdef CONFIG_PPC64
720 if (system_state == SYSTEM_RUNNING)
721 vdso_data->processorCount++;
722
723 vdso_getcpu_init();
724 #endif
725 /* Update sibling maps */
726 base = cpu_first_thread_sibling(cpu);
727 for (i = 0; i < threads_per_core; i++) {
728 if (cpu_is_offline(base + i) && (cpu != base + i))
729 continue;
730 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
731 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
732
733 /* cpu_core_map should be a superset of
734 * cpu_sibling_map even if we don't have cache
735 * information, so update the former here, too.
736 */
737 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
738 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
739 }
740 traverse_core_siblings(cpu, true);
741
742 smp_wmb();
743 notify_cpu_starting(cpu);
744 set_cpu_online(cpu, true);
745
746 local_irq_enable();
747
748 cpu_startup_entry(CPUHP_ONLINE);
749
750 BUG();
751 }
752
753 int setup_profiling_timer(unsigned int multiplier)
754 {
755 return 0;
756 }
757
758 void __init smp_cpus_done(unsigned int max_cpus)
759 {
760 cpumask_var_t old_mask;
761
762 /* We want the setup_cpu() here to be called from CPU 0, but our
763 * init thread may have been "borrowed" by another CPU in the meantime
764 * se we pin us down to CPU 0 for a short while
765 */
766 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
767 cpumask_copy(old_mask, tsk_cpus_allowed(current));
768 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
769
770 if (smp_ops && smp_ops->setup_cpu)
771 smp_ops->setup_cpu(boot_cpuid);
772
773 set_cpus_allowed_ptr(current, old_mask);
774
775 free_cpumask_var(old_mask);
776
777 if (smp_ops && smp_ops->bringup_done)
778 smp_ops->bringup_done();
779
780 dump_numa_cpu_topology();
781
782 }
783
784 int arch_sd_sibling_asym_packing(void)
785 {
786 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
787 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
788 return SD_ASYM_PACKING;
789 }
790 return 0;
791 }
792
793 #ifdef CONFIG_HOTPLUG_CPU
794 int __cpu_disable(void)
795 {
796 int cpu = smp_processor_id();
797 int base, i;
798 int err;
799
800 if (!smp_ops->cpu_disable)
801 return -ENOSYS;
802
803 err = smp_ops->cpu_disable();
804 if (err)
805 return err;
806
807 /* Update sibling maps */
808 base = cpu_first_thread_sibling(cpu);
809 for (i = 0; i < threads_per_core; i++) {
810 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
811 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
812 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
813 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
814 }
815 traverse_core_siblings(cpu, false);
816
817 return 0;
818 }
819
820 void __cpu_die(unsigned int cpu)
821 {
822 if (smp_ops->cpu_die)
823 smp_ops->cpu_die(cpu);
824 }
825
826 void cpu_die(void)
827 {
828 if (ppc_md.cpu_die)
829 ppc_md.cpu_die();
830
831 /* If we return, we re-enter start_secondary */
832 start_secondary_resume();
833 }
834
835 #endif
This page took 0.086314 seconds and 5 git commands to generate.