2 * arch/s390/kernel/smp.c
4 * Copyright (C) IBM Corp. 1999,2006
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/smp_lock.h>
29 #include <linux/delay.h>
30 #include <linux/cache.h>
31 #include <linux/interrupt.h>
32 #include <linux/cpu.h>
33 #include <linux/timex.h>
35 #include <asm/setup.h>
37 #include <asm/pgalloc.h>
39 #include <asm/s390_ext.h>
40 #include <asm/cpcmd.h>
41 #include <asm/tlbflush.h>
42 #include <asm/timer.h>
44 extern volatile int __cpu_logical_map
[];
47 * An array with a pointer the lowcore of every CPU.
50 struct _lowcore
*lowcore_ptr
[NR_CPUS
];
52 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
53 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
55 static struct task_struct
*current_set
[NR_CPUS
];
57 static void smp_ext_bitcall(int, ec_bit_sig
);
60 * Structure and data for __smp_call_function_map(). This is designed to
61 * minimise static memory requirements. It also looks cleaner.
63 static DEFINE_SPINLOCK(call_lock
);
65 struct call_data_struct
{
66 void (*func
) (void *info
);
73 static struct call_data_struct
* call_data
;
76 * 'Call function' interrupt callback
78 static void do_call_function(void)
80 void (*func
) (void *info
) = call_data
->func
;
81 void *info
= call_data
->info
;
82 int wait
= call_data
->wait
;
84 cpu_set(smp_processor_id(), call_data
->started
);
87 cpu_set(smp_processor_id(), call_data
->finished
);;
90 static void __smp_call_function_map(void (*func
) (void *info
), void *info
,
91 int nonatomic
, int wait
, cpumask_t map
)
93 struct call_data_struct data
;
97 * Can deadlock when interrupts are disabled or if in wrong context,
98 * caller must disable preemption
100 WARN_ON(irqs_disabled() || in_irq() || preemptible());
103 * Check for local function call. We have to have the same call order
104 * as in on_each_cpu() because of machine_restart_smp().
106 if (cpu_isset(smp_processor_id(), map
)) {
108 cpu_clear(smp_processor_id(), map
);
111 cpus_and(map
, map
, cpu_online_map
);
117 data
.started
= CPU_MASK_NONE
;
120 data
.finished
= CPU_MASK_NONE
;
122 spin_lock_bh(&call_lock
);
125 for_each_cpu_mask(cpu
, map
)
126 smp_ext_bitcall(cpu
, ec_call_function
);
128 /* Wait for response */
129 while (!cpus_equal(map
, data
.started
))
133 while (!cpus_equal(map
, data
.finished
))
136 spin_unlock_bh(&call_lock
);
147 * @func: the function to run; this must be fast and non-blocking
148 * @info: an arbitrary pointer to pass to the function
150 * @wait: if true, wait (atomically) until function has completed on other CPUs
152 * Run a function on all other CPUs.
154 * You must not call this function with disabled interrupts or from a
155 * hardware interrupt handler. Must be called with preemption disabled.
156 * You may call it from a bottom half.
158 int smp_call_function(void (*func
) (void *info
), void *info
, int nonatomic
,
163 map
= cpu_online_map
;
164 cpu_clear(smp_processor_id(), map
);
165 __smp_call_function_map(func
, info
, nonatomic
, wait
, map
);
168 EXPORT_SYMBOL(smp_call_function
);
171 * smp_call_function_on:
172 * @func: the function to run; this must be fast and non-blocking
173 * @info: an arbitrary pointer to pass to the function
175 * @wait: if true, wait (atomically) until function has completed on other CPUs
176 * @cpu: the CPU where func should run
178 * Run a function on one processor.
180 * You must not call this function with disabled interrupts or from a
181 * hardware interrupt handler. Must be called with preemption disabled.
182 * You may call it from a bottom half.
184 int smp_call_function_on(void (*func
) (void *info
), void *info
, int nonatomic
,
187 cpumask_t map
= CPU_MASK_NONE
;
190 __smp_call_function_map(func
, info
, nonatomic
, wait
, map
);
193 EXPORT_SYMBOL(smp_call_function_on
);
195 static void do_send_stop(void)
199 /* stop all processors */
200 for_each_online_cpu(cpu
) {
201 if (cpu
== smp_processor_id())
204 rc
= signal_processor(cpu
, sigp_stop
);
205 } while (rc
== sigp_busy
);
209 static void do_store_status(void)
213 /* store status of all processors in their lowcores (real 0) */
214 for_each_online_cpu(cpu
) {
215 if (cpu
== smp_processor_id())
218 rc
= signal_processor_p(
219 (__u32
)(unsigned long) lowcore_ptr
[cpu
], cpu
,
220 sigp_store_status_at_address
);
221 } while(rc
== sigp_busy
);
225 static void do_wait_for_stop(void)
229 /* Wait for all other cpus to enter stopped state */
230 for_each_online_cpu(cpu
) {
231 if (cpu
== smp_processor_id())
233 while(!smp_cpu_not_running(cpu
))
239 * this function sends a 'stop' sigp to all other CPUs in the system.
240 * it goes straight through.
242 void smp_send_stop(void)
244 /* Disable all interrupts/machine checks */
245 __load_psw_mask(psw_kernel_bits
& ~PSW_MASK_MCHECK
);
247 /* write magic number to zero page (absolute 0) */
248 lowcore_ptr
[smp_processor_id()]->panic_magic
= __PANIC_MAGIC
;
250 /* stop other processors. */
253 /* wait until other processors are stopped */
256 /* store status of other processors. */
261 * Reboot, halt and power_off routines for SMP.
264 void machine_restart_smp(char * __unused
)
270 void machine_halt_smp(void)
273 if (MACHINE_IS_VM
&& strlen(vmhalt_cmd
) > 0)
274 __cpcmd(vmhalt_cmd
, NULL
, 0, NULL
);
275 signal_processor(smp_processor_id(), sigp_stop_and_store_status
);
279 void machine_power_off_smp(void)
282 if (MACHINE_IS_VM
&& strlen(vmpoff_cmd
) > 0)
283 __cpcmd(vmpoff_cmd
, NULL
, 0, NULL
);
284 signal_processor(smp_processor_id(), sigp_stop_and_store_status
);
289 * This is the main routine where commands issued by other
293 static void do_ext_call_interrupt(__u16 code
)
298 * handle bit signal external calls
300 * For the ec_schedule signal we have to do nothing. All the work
301 * is done automatically when we return from the interrupt.
303 bits
= xchg(&S390_lowcore
.ext_call_fast
, 0);
305 if (test_bit(ec_call_function
, &bits
))
310 * Send an external call sigp to another cpu and return without waiting
311 * for its completion.
313 static void smp_ext_bitcall(int cpu
, ec_bit_sig sig
)
316 * Set signaling bit in lowcore of target cpu and kick it
318 set_bit(sig
, (unsigned long *) &lowcore_ptr
[cpu
]->ext_call_fast
);
319 while(signal_processor(cpu
, sigp_emergency_signal
) == sigp_busy
)
325 * this function sends a 'purge tlb' signal to another CPU.
327 void smp_ptlb_callback(void *info
)
332 void smp_ptlb_all(void)
334 on_each_cpu(smp_ptlb_callback
, NULL
, 0, 1);
336 EXPORT_SYMBOL(smp_ptlb_all
);
337 #endif /* ! CONFIG_64BIT */
340 * this function sends a 'reschedule' IPI to another CPU.
341 * it goes straight through and wastes no time serializing
342 * anything. Worst case is that we lose a reschedule ...
344 void smp_send_reschedule(int cpu
)
346 smp_ext_bitcall(cpu
, ec_schedule
);
350 * parameter area for the set/clear control bit callbacks
352 struct ec_creg_mask_parms
{
353 unsigned long orvals
[16];
354 unsigned long andvals
[16];
358 * callback for setting/clearing control bits
360 static void smp_ctl_bit_callback(void *info
) {
361 struct ec_creg_mask_parms
*pp
= info
;
362 unsigned long cregs
[16];
365 __ctl_store(cregs
, 0, 15);
366 for (i
= 0; i
<= 15; i
++)
367 cregs
[i
] = (cregs
[i
] & pp
->andvals
[i
]) | pp
->orvals
[i
];
368 __ctl_load(cregs
, 0, 15);
372 * Set a bit in a control register of all cpus
374 void smp_ctl_set_bit(int cr
, int bit
)
376 struct ec_creg_mask_parms parms
;
378 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
379 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
380 parms
.orvals
[cr
] = 1 << bit
;
381 on_each_cpu(smp_ctl_bit_callback
, &parms
, 0, 1);
385 * Clear a bit in a control register of all cpus
387 void smp_ctl_clear_bit(int cr
, int bit
)
389 struct ec_creg_mask_parms parms
;
391 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
392 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
393 parms
.andvals
[cr
] = ~(1L << bit
);
394 on_each_cpu(smp_ctl_bit_callback
, &parms
, 0, 1);
398 * Lets check how many CPUs we have.
402 __init
smp_count_cpus(void)
404 unsigned int cpu
, num_cpus
;
408 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
411 boot_cpu_addr
= S390_lowcore
.cpu_data
.cpu_addr
;
412 current_thread_info()->cpu
= 0;
414 for (cpu
= 0; cpu
<= 65535; cpu
++) {
415 if ((__u16
) cpu
== boot_cpu_addr
)
417 __cpu_logical_map
[1] = (__u16
) cpu
;
418 if (signal_processor(1, sigp_sense
) ==
419 sigp_not_operational
)
424 printk("Detected %d CPU's\n",(int) num_cpus
);
425 printk("Boot cpu address %2X\n", boot_cpu_addr
);
431 * Activate a secondary processor.
433 int __devinit
start_secondary(void *cpuvoid
)
438 /* Enable TOD clock interrupts on the secondary cpu. */
440 #ifdef CONFIG_VIRT_TIMER
441 /* Enable cpu timer interrupts on the secondary cpu. */
444 /* Enable pfault pseudo page faults on this cpu. */
447 /* Mark this cpu as online */
448 cpu_set(smp_processor_id(), cpu_online_map
);
449 /* Switch on interrupts */
451 /* Print info about this processor */
452 print_cpu_info(&S390_lowcore
.cpu_data
);
453 /* cpu_idle will call schedule for us */
458 static void __init
smp_create_idle(unsigned int cpu
)
460 struct task_struct
*p
;
463 * don't care about the psw and regs settings since we'll never
464 * reschedule the forked task.
468 panic("failed fork for CPU %u: %li", cpu
, PTR_ERR(p
));
469 current_set
[cpu
] = p
;
472 /* Reserving and releasing of CPUs */
474 static DEFINE_SPINLOCK(smp_reserve_lock
);
475 static int smp_cpu_reserved
[NR_CPUS
];
478 smp_get_cpu(cpumask_t cpu_mask
)
483 spin_lock_irqsave(&smp_reserve_lock
, flags
);
484 /* Try to find an already reserved cpu. */
485 for_each_cpu_mask(cpu
, cpu_mask
) {
486 if (smp_cpu_reserved
[cpu
] != 0) {
487 smp_cpu_reserved
[cpu
]++;
492 /* Reserve a new cpu from cpu_mask. */
493 for_each_cpu_mask(cpu
, cpu_mask
) {
494 if (cpu_online(cpu
)) {
495 smp_cpu_reserved
[cpu
]++;
501 spin_unlock_irqrestore(&smp_reserve_lock
, flags
);
510 spin_lock_irqsave(&smp_reserve_lock
, flags
);
511 smp_cpu_reserved
[cpu
]--;
512 spin_unlock_irqrestore(&smp_reserve_lock
, flags
);
520 /* Check for stopped state */
521 if (signal_processor_ps(&status
, 0, cpu
, sigp_sense
) == sigp_status_stored
) {
528 /* Upping and downing of CPUs */
531 __cpu_up(unsigned int cpu
)
533 struct task_struct
*idle
;
534 struct _lowcore
*cpu_lowcore
;
535 struct stack_frame
*sf
;
539 for (curr_cpu
= 0; curr_cpu
<= 65535; curr_cpu
++) {
540 __cpu_logical_map
[cpu
] = (__u16
) curr_cpu
;
541 if (cpu_stopped(cpu
))
545 if (!cpu_stopped(cpu
))
548 ccode
= signal_processor_p((__u32
)(unsigned long)(lowcore_ptr
[cpu
]),
549 cpu
, sigp_set_prefix
);
551 printk("sigp_set_prefix failed for cpu %d "
552 "with condition code %d\n",
553 (int) cpu
, (int) ccode
);
557 idle
= current_set
[cpu
];
558 cpu_lowcore
= lowcore_ptr
[cpu
];
559 cpu_lowcore
->kernel_stack
= (unsigned long)
560 task_stack_page(idle
) + (THREAD_SIZE
);
561 sf
= (struct stack_frame
*) (cpu_lowcore
->kernel_stack
562 - sizeof(struct pt_regs
)
563 - sizeof(struct stack_frame
));
564 memset(sf
, 0, sizeof(struct stack_frame
));
565 sf
->gprs
[9] = (unsigned long) sf
;
566 cpu_lowcore
->save_area
[15] = (unsigned long) sf
;
567 __ctl_store(cpu_lowcore
->cregs_save_area
[0], 0, 15);
570 : : "a" (&cpu_lowcore
->access_regs_save_area
) : "memory");
571 cpu_lowcore
->percpu_offset
= __per_cpu_offset
[cpu
];
572 cpu_lowcore
->current_task
= (unsigned long) idle
;
573 cpu_lowcore
->cpu_data
.cpu_nr
= cpu
;
576 while (signal_processor(cpu
,sigp_restart
) == sigp_busy
)
579 while (!cpu_online(cpu
))
584 static unsigned int __initdata additional_cpus
;
585 static unsigned int __initdata possible_cpus
;
587 void __init
smp_setup_cpu_possible_map(void)
589 unsigned int phy_cpus
, pos_cpus
, cpu
;
591 phy_cpus
= smp_count_cpus();
592 pos_cpus
= min(phy_cpus
+ additional_cpus
, (unsigned int) NR_CPUS
);
595 pos_cpus
= min(possible_cpus
, (unsigned int) NR_CPUS
);
597 for (cpu
= 0; cpu
< pos_cpus
; cpu
++)
598 cpu_set(cpu
, cpu_possible_map
);
600 phy_cpus
= min(phy_cpus
, pos_cpus
);
602 for (cpu
= 0; cpu
< phy_cpus
; cpu
++)
603 cpu_set(cpu
, cpu_present_map
);
606 #ifdef CONFIG_HOTPLUG_CPU
608 static int __init
setup_additional_cpus(char *s
)
610 additional_cpus
= simple_strtoul(s
, NULL
, 0);
613 early_param("additional_cpus", setup_additional_cpus
);
615 static int __init
setup_possible_cpus(char *s
)
617 possible_cpus
= simple_strtoul(s
, NULL
, 0);
620 early_param("possible_cpus", setup_possible_cpus
);
626 struct ec_creg_mask_parms cr_parms
;
627 int cpu
= smp_processor_id();
629 spin_lock_irqsave(&smp_reserve_lock
, flags
);
630 if (smp_cpu_reserved
[cpu
] != 0) {
631 spin_unlock_irqrestore(&smp_reserve_lock
, flags
);
634 cpu_clear(cpu
, cpu_online_map
);
636 /* Disable pfault pseudo page faults on this cpu. */
639 memset(&cr_parms
.orvals
, 0, sizeof(cr_parms
.orvals
));
640 memset(&cr_parms
.andvals
, 0xff, sizeof(cr_parms
.andvals
));
642 /* disable all external interrupts */
643 cr_parms
.orvals
[0] = 0;
644 cr_parms
.andvals
[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
645 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
646 /* disable all I/O interrupts */
647 cr_parms
.orvals
[6] = 0;
648 cr_parms
.andvals
[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
649 1<<27 | 1<<26 | 1<<25 | 1<<24);
650 /* disable most machine checks */
651 cr_parms
.orvals
[14] = 0;
652 cr_parms
.andvals
[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
654 smp_ctl_bit_callback(&cr_parms
);
656 spin_unlock_irqrestore(&smp_reserve_lock
, flags
);
661 __cpu_die(unsigned int cpu
)
663 /* Wait until target cpu is down */
664 while (!smp_cpu_not_running(cpu
))
666 printk("Processor %d spun down\n", cpu
);
673 signal_processor(smp_processor_id(), sigp_stop
);
678 #endif /* CONFIG_HOTPLUG_CPU */
681 * Cycle through the processors and setup structures.
684 void __init
smp_prepare_cpus(unsigned int max_cpus
)
690 /* request the 0x1201 emergency signal external interrupt */
691 if (register_external_interrupt(0x1201, do_ext_call_interrupt
) != 0)
692 panic("Couldn't request external interrupt 0x1201");
693 memset(lowcore_ptr
,0,sizeof(lowcore_ptr
));
695 * Initialize prefix pages and stacks for all possible cpus
697 print_cpu_info(&S390_lowcore
.cpu_data
);
699 for_each_possible_cpu(i
) {
700 lowcore_ptr
[i
] = (struct _lowcore
*)
701 __get_free_pages(GFP_KERNEL
|GFP_DMA
,
702 sizeof(void*) == 8 ? 1 : 0);
703 stack
= __get_free_pages(GFP_KERNEL
,ASYNC_ORDER
);
704 if (lowcore_ptr
[i
] == NULL
|| stack
== 0ULL)
705 panic("smp_boot_cpus failed to allocate memory\n");
707 *(lowcore_ptr
[i
]) = S390_lowcore
;
708 lowcore_ptr
[i
]->async_stack
= stack
+ (ASYNC_SIZE
);
709 stack
= __get_free_pages(GFP_KERNEL
,0);
711 panic("smp_boot_cpus failed to allocate memory\n");
712 lowcore_ptr
[i
]->panic_stack
= stack
+ (PAGE_SIZE
);
714 if (MACHINE_HAS_IEEE
) {
715 lowcore_ptr
[i
]->extended_save_area_addr
=
716 (__u32
) __get_free_pages(GFP_KERNEL
,0);
717 if (lowcore_ptr
[i
]->extended_save_area_addr
== 0)
718 panic("smp_boot_cpus failed to "
719 "allocate memory\n");
724 if (MACHINE_HAS_IEEE
)
725 ctl_set_bit(14, 29); /* enable extended save area */
727 set_prefix((u32
)(unsigned long) lowcore_ptr
[smp_processor_id()]);
729 for_each_possible_cpu(cpu
)
730 if (cpu
!= smp_processor_id())
731 smp_create_idle(cpu
);
734 void __devinit
smp_prepare_boot_cpu(void)
736 BUG_ON(smp_processor_id() != 0);
738 cpu_set(0, cpu_online_map
);
739 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
740 current_set
[0] = current
;
743 void smp_cpus_done(unsigned int max_cpus
)
745 cpu_present_map
= cpu_possible_map
;
749 * the frequency of the profiling timer can be changed
750 * by writing a multiplier value into /proc/profile.
752 * usually you want to run this on all CPUs ;)
754 int setup_profiling_timer(unsigned int multiplier
)
759 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
761 static int __init
topology_init(void)
766 for_each_possible_cpu(cpu
) {
767 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
770 ret
= register_cpu(c
, cpu
);
772 printk(KERN_WARNING
"topology_init: register_cpu %d "
773 "failed (%d)\n", cpu
, ret
);
778 subsys_initcall(topology_init
);
780 EXPORT_SYMBOL(cpu_online_map
);
781 EXPORT_SYMBOL(cpu_possible_map
);
782 EXPORT_SYMBOL(lowcore_ptr
);
783 EXPORT_SYMBOL(smp_ctl_set_bit
);
784 EXPORT_SYMBOL(smp_ctl_clear_bit
);
785 EXPORT_SYMBOL(smp_get_cpu
);
786 EXPORT_SYMBOL(smp_put_cpu
);