2 * SMP related functions
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/workqueue.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
25 #include <linux/err.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/irqflags.h>
31 #include <linux/cpu.h>
32 #include <linux/slab.h>
33 #include <linux/crash_dump.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/switch_to.h>
36 #include <asm/facility.h>
38 #include <asm/setup.h>
40 #include <asm/tlbflush.h>
41 #include <asm/vtimer.h>
42 #include <asm/lowcore.h>
45 #include <asm/debug.h>
46 #include <asm/os_info.h>
53 ec_call_function_single
,
62 static DEFINE_PER_CPU(struct cpu
*, cpu_device
);
65 struct _lowcore
*lowcore
; /* lowcore page(s) for the cpu */
66 unsigned long ec_mask
; /* bit mask for ec_xxx functions */
67 signed char state
; /* physical cpu state */
68 signed char polarization
; /* physical polarization */
69 u16 address
; /* physical cpu address */
72 static u8 boot_cpu_type
;
73 static struct pcpu pcpu_devices
[NR_CPUS
];
75 unsigned int smp_cpu_mt_shift
;
76 EXPORT_SYMBOL(smp_cpu_mt_shift
);
78 unsigned int smp_cpu_mtid
;
79 EXPORT_SYMBOL(smp_cpu_mtid
);
81 static unsigned int smp_max_threads __initdata
= -1U;
83 static int __init
early_nosmt(char *s
)
88 early_param("nosmt", early_nosmt
);
90 static int __init
early_smt(char *s
)
92 get_option(&s
, &smp_max_threads
);
95 early_param("smt", early_smt
);
98 * The smp_cpu_state_mutex must be held when changing the state or polarization
99 * member of a pcpu data structure within the pcpu_devices arreay.
101 DEFINE_MUTEX(smp_cpu_state_mutex
);
104 * Signal processor helper functions.
106 static inline int __pcpu_sigp_relax(u16 addr
, u8 order
, unsigned long parm
,
112 cc
= __pcpu_sigp(addr
, order
, parm
, NULL
);
113 if (cc
!= SIGP_CC_BUSY
)
119 static int pcpu_sigp_retry(struct pcpu
*pcpu
, u8 order
, u32 parm
)
123 for (retry
= 0; ; retry
++) {
124 cc
= __pcpu_sigp(pcpu
->address
, order
, parm
, NULL
);
125 if (cc
!= SIGP_CC_BUSY
)
133 static inline int pcpu_stopped(struct pcpu
*pcpu
)
135 u32
uninitialized_var(status
);
137 if (__pcpu_sigp(pcpu
->address
, SIGP_SENSE
,
138 0, &status
) != SIGP_CC_STATUS_STORED
)
140 return !!(status
& (SIGP_STATUS_CHECK_STOP
|SIGP_STATUS_STOPPED
));
143 static inline int pcpu_running(struct pcpu
*pcpu
)
145 if (__pcpu_sigp(pcpu
->address
, SIGP_SENSE_RUNNING
,
146 0, NULL
) != SIGP_CC_STATUS_STORED
)
148 /* Status stored condition code is equivalent to cpu not running. */
153 * Find struct pcpu by cpu address.
155 static struct pcpu
*pcpu_find_address(const struct cpumask
*mask
, u16 address
)
159 for_each_cpu(cpu
, mask
)
160 if (pcpu_devices
[cpu
].address
== address
)
161 return pcpu_devices
+ cpu
;
165 static void pcpu_ec_call(struct pcpu
*pcpu
, int ec_bit
)
169 if (test_and_set_bit(ec_bit
, &pcpu
->ec_mask
))
171 order
= pcpu_running(pcpu
) ? SIGP_EXTERNAL_CALL
: SIGP_EMERGENCY_SIGNAL
;
172 pcpu_sigp_retry(pcpu
, order
, 0);
175 #define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
176 #define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
178 static int pcpu_alloc_lowcore(struct pcpu
*pcpu
, int cpu
)
180 unsigned long async_stack
, panic_stack
;
183 if (pcpu
!= &pcpu_devices
[0]) {
184 pcpu
->lowcore
= (struct _lowcore
*)
185 __get_free_pages(GFP_KERNEL
| GFP_DMA
, LC_ORDER
);
186 async_stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
187 panic_stack
= __get_free_page(GFP_KERNEL
);
188 if (!pcpu
->lowcore
|| !panic_stack
|| !async_stack
)
191 async_stack
= pcpu
->lowcore
->async_stack
- ASYNC_FRAME_OFFSET
;
192 panic_stack
= pcpu
->lowcore
->panic_stack
- PANIC_FRAME_OFFSET
;
195 memcpy(lc
, &S390_lowcore
, 512);
196 memset((char *) lc
+ 512, 0, sizeof(*lc
) - 512);
197 lc
->async_stack
= async_stack
+ ASYNC_FRAME_OFFSET
;
198 lc
->panic_stack
= panic_stack
+ PANIC_FRAME_OFFSET
;
200 lc
->spinlock_lockval
= arch_spin_lockval(cpu
);
202 lc
->vector_save_area_addr
=
203 (unsigned long) &lc
->vector_save_area
;
204 if (vdso_alloc_per_cpu(lc
))
206 lowcore_ptr
[cpu
] = lc
;
207 pcpu_sigp_retry(pcpu
, SIGP_SET_PREFIX
, (u32
)(unsigned long) lc
);
210 if (pcpu
!= &pcpu_devices
[0]) {
211 free_page(panic_stack
);
212 free_pages(async_stack
, ASYNC_ORDER
);
213 free_pages((unsigned long) pcpu
->lowcore
, LC_ORDER
);
218 #ifdef CONFIG_HOTPLUG_CPU
220 static void pcpu_free_lowcore(struct pcpu
*pcpu
)
222 pcpu_sigp_retry(pcpu
, SIGP_SET_PREFIX
, 0);
223 lowcore_ptr
[pcpu
- pcpu_devices
] = NULL
;
224 vdso_free_per_cpu(pcpu
->lowcore
);
225 if (pcpu
== &pcpu_devices
[0])
227 free_page(pcpu
->lowcore
->panic_stack
-PANIC_FRAME_OFFSET
);
228 free_pages(pcpu
->lowcore
->async_stack
-ASYNC_FRAME_OFFSET
, ASYNC_ORDER
);
229 free_pages((unsigned long) pcpu
->lowcore
, LC_ORDER
);
232 #endif /* CONFIG_HOTPLUG_CPU */
234 static void pcpu_prepare_secondary(struct pcpu
*pcpu
, int cpu
)
236 struct _lowcore
*lc
= pcpu
->lowcore
;
238 if (MACHINE_HAS_TLB_LC
)
239 cpumask_set_cpu(cpu
, &init_mm
.context
.cpu_attach_mask
);
240 cpumask_set_cpu(cpu
, mm_cpumask(&init_mm
));
241 atomic_inc(&init_mm
.context
.attach_count
);
243 lc
->spinlock_lockval
= arch_spin_lockval(cpu
);
244 lc
->percpu_offset
= __per_cpu_offset
[cpu
];
245 lc
->kernel_asce
= S390_lowcore
.kernel_asce
;
246 lc
->machine_flags
= S390_lowcore
.machine_flags
;
247 lc
->user_timer
= lc
->system_timer
= lc
->steal_timer
= 0;
248 __ctl_store(lc
->cregs_save_area
, 0, 15);
249 save_access_regs((unsigned int *) lc
->access_regs_save_area
);
250 memcpy(lc
->stfle_fac_list
, S390_lowcore
.stfle_fac_list
,
254 static void pcpu_attach_task(struct pcpu
*pcpu
, struct task_struct
*tsk
)
256 struct _lowcore
*lc
= pcpu
->lowcore
;
257 struct thread_info
*ti
= task_thread_info(tsk
);
259 lc
->kernel_stack
= (unsigned long) task_stack_page(tsk
)
260 + THREAD_SIZE
- STACK_FRAME_OVERHEAD
- sizeof(struct pt_regs
);
261 lc
->thread_info
= (unsigned long) task_thread_info(tsk
);
262 lc
->current_task
= (unsigned long) tsk
;
263 lc
->user_timer
= ti
->user_timer
;
264 lc
->system_timer
= ti
->system_timer
;
268 static void pcpu_start_fn(struct pcpu
*pcpu
, void (*func
)(void *), void *data
)
270 struct _lowcore
*lc
= pcpu
->lowcore
;
272 lc
->restart_stack
= lc
->kernel_stack
;
273 lc
->restart_fn
= (unsigned long) func
;
274 lc
->restart_data
= (unsigned long) data
;
275 lc
->restart_source
= -1UL;
276 pcpu_sigp_retry(pcpu
, SIGP_RESTART
, 0);
280 * Call function via PSW restart on pcpu and stop the current cpu.
282 static void pcpu_delegate(struct pcpu
*pcpu
, void (*func
)(void *),
283 void *data
, unsigned long stack
)
285 struct _lowcore
*lc
= lowcore_ptr
[pcpu
- pcpu_devices
];
286 unsigned long source_cpu
= stap();
288 __load_psw_mask(PSW_KERNEL_BITS
);
289 if (pcpu
->address
== source_cpu
)
290 func(data
); /* should not return */
291 /* Stop target cpu (if func returns this stops the current cpu). */
292 pcpu_sigp_retry(pcpu
, SIGP_STOP
, 0);
293 /* Restart func on the target cpu and stop the current cpu. */
294 mem_assign_absolute(lc
->restart_stack
, stack
);
295 mem_assign_absolute(lc
->restart_fn
, (unsigned long) func
);
296 mem_assign_absolute(lc
->restart_data
, (unsigned long) data
);
297 mem_assign_absolute(lc
->restart_source
, source_cpu
);
299 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
300 " brc 2,0b # busy, try again\n"
301 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
302 " brc 2,1b # busy, try again\n"
303 : : "d" (pcpu
->address
), "d" (source_cpu
),
304 "K" (SIGP_RESTART
), "K" (SIGP_STOP
)
310 * Enable additional logical cpus for multi-threading.
312 static int pcpu_set_smt(unsigned int mtid
)
314 register unsigned long reg1
asm ("1") = (unsigned long) mtid
;
317 if (smp_cpu_mtid
== mtid
)
320 " sigp %1,0,%2 # sigp set multi-threading\n"
323 : "=d" (cc
) : "d" (reg1
), "K" (SIGP_SET_MULTI_THREADING
)
327 smp_cpu_mt_shift
= 0;
328 while (smp_cpu_mtid
>= (1U << smp_cpu_mt_shift
))
330 pcpu_devices
[0].address
= stap();
336 * Call function on an online CPU.
338 void smp_call_online_cpu(void (*func
)(void *), void *data
)
342 /* Use the current cpu if it is online. */
343 pcpu
= pcpu_find_address(cpu_online_mask
, stap());
345 /* Use the first online cpu. */
346 pcpu
= pcpu_devices
+ cpumask_first(cpu_online_mask
);
347 pcpu_delegate(pcpu
, func
, data
, (unsigned long) restart_stack
);
351 * Call function on the ipl CPU.
353 void smp_call_ipl_cpu(void (*func
)(void *), void *data
)
355 pcpu_delegate(&pcpu_devices
[0], func
, data
,
356 pcpu_devices
->lowcore
->panic_stack
-
357 PANIC_FRAME_OFFSET
+ PAGE_SIZE
);
360 int smp_find_processor_id(u16 address
)
364 for_each_present_cpu(cpu
)
365 if (pcpu_devices
[cpu
].address
== address
)
370 int smp_vcpu_scheduled(int cpu
)
372 return pcpu_running(pcpu_devices
+ cpu
);
375 void smp_yield_cpu(int cpu
)
377 if (MACHINE_HAS_DIAG9C
)
378 asm volatile("diag %0,0,0x9c"
379 : : "d" (pcpu_devices
[cpu
].address
));
380 else if (MACHINE_HAS_DIAG44
)
381 asm volatile("diag 0,0,0x44");
385 * Send cpus emergency shutdown signal. This gives the cpus the
386 * opportunity to complete outstanding interrupts.
388 static void smp_emergency_stop(cpumask_t
*cpumask
)
393 end
= get_tod_clock() + (1000000UL << 12);
394 for_each_cpu(cpu
, cpumask
) {
395 struct pcpu
*pcpu
= pcpu_devices
+ cpu
;
396 set_bit(ec_stop_cpu
, &pcpu
->ec_mask
);
397 while (__pcpu_sigp(pcpu
->address
, SIGP_EMERGENCY_SIGNAL
,
398 0, NULL
) == SIGP_CC_BUSY
&&
399 get_tod_clock() < end
)
402 while (get_tod_clock() < end
) {
403 for_each_cpu(cpu
, cpumask
)
404 if (pcpu_stopped(pcpu_devices
+ cpu
))
405 cpumask_clear_cpu(cpu
, cpumask
);
406 if (cpumask_empty(cpumask
))
413 * Stop all cpus but the current one.
415 void smp_send_stop(void)
420 /* Disable all interrupts/machine checks */
421 __load_psw_mask(PSW_KERNEL_BITS
| PSW_MASK_DAT
);
422 trace_hardirqs_off();
424 debug_set_critical();
425 cpumask_copy(&cpumask
, cpu_online_mask
);
426 cpumask_clear_cpu(smp_processor_id(), &cpumask
);
428 if (oops_in_progress
)
429 smp_emergency_stop(&cpumask
);
431 /* stop all processors */
432 for_each_cpu(cpu
, &cpumask
) {
433 struct pcpu
*pcpu
= pcpu_devices
+ cpu
;
434 pcpu_sigp_retry(pcpu
, SIGP_STOP
, 0);
435 while (!pcpu_stopped(pcpu
))
441 * This is the main routine where commands issued by other
444 static void smp_handle_ext_call(void)
448 /* handle bit signal external calls */
449 bits
= xchg(&pcpu_devices
[smp_processor_id()].ec_mask
, 0);
450 if (test_bit(ec_stop_cpu
, &bits
))
452 if (test_bit(ec_schedule
, &bits
))
454 if (test_bit(ec_call_function_single
, &bits
))
455 generic_smp_call_function_single_interrupt();
458 static void do_ext_call_interrupt(struct ext_code ext_code
,
459 unsigned int param32
, unsigned long param64
)
461 inc_irq_stat(ext_code
.code
== 0x1202 ? IRQEXT_EXC
: IRQEXT_EMS
);
462 smp_handle_ext_call();
465 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
469 for_each_cpu(cpu
, mask
)
470 pcpu_ec_call(pcpu_devices
+ cpu
, ec_call_function_single
);
473 void arch_send_call_function_single_ipi(int cpu
)
475 pcpu_ec_call(pcpu_devices
+ cpu
, ec_call_function_single
);
479 * this function sends a 'reschedule' IPI to another CPU.
480 * it goes straight through and wastes no time serializing
481 * anything. Worst case is that we lose a reschedule ...
483 void smp_send_reschedule(int cpu
)
485 pcpu_ec_call(pcpu_devices
+ cpu
, ec_schedule
);
489 * parameter area for the set/clear control bit callbacks
491 struct ec_creg_mask_parms
{
493 unsigned long andval
;
498 * callback for setting/clearing control bits
500 static void smp_ctl_bit_callback(void *info
)
502 struct ec_creg_mask_parms
*pp
= info
;
503 unsigned long cregs
[16];
505 __ctl_store(cregs
, 0, 15);
506 cregs
[pp
->cr
] = (cregs
[pp
->cr
] & pp
->andval
) | pp
->orval
;
507 __ctl_load(cregs
, 0, 15);
511 * Set a bit in a control register of all cpus
513 void smp_ctl_set_bit(int cr
, int bit
)
515 struct ec_creg_mask_parms parms
= { 1UL << bit
, -1UL, cr
};
517 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
519 EXPORT_SYMBOL(smp_ctl_set_bit
);
522 * Clear a bit in a control register of all cpus
524 void smp_ctl_clear_bit(int cr
, int bit
)
526 struct ec_creg_mask_parms parms
= { 0, ~(1UL << bit
), cr
};
528 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
530 EXPORT_SYMBOL(smp_ctl_clear_bit
);
532 #ifdef CONFIG_CRASH_DUMP
534 static inline void __smp_store_cpu_state(int cpu
, u16 address
, int is_boot_cpu
)
536 void *lc
= pcpu_devices
[0].lowcore
;
537 struct save_area_ext
*sa_ext
;
540 sa_ext
= dump_save_area_create(cpu
);
542 panic("could not allocate memory for save area\n");
544 /* Copy the registers of the boot CPU. */
545 copy_oldmem_page(1, (void *) &sa_ext
->sa
, sizeof(sa_ext
->sa
),
546 SAVE_AREA_BASE
- PAGE_SIZE
, 0);
548 save_vx_regs_safe(sa_ext
->vx_regs
);
551 /* Get the registers of a non-boot cpu. */
552 __pcpu_sigp_relax(address
, SIGP_STOP_AND_STORE_STATUS
, 0, NULL
);
553 memcpy_real(&sa_ext
->sa
, lc
+ SAVE_AREA_BASE
, sizeof(sa_ext
->sa
));
556 /* Get the VX registers */
557 vx_sa
= __get_free_page(GFP_KERNEL
);
559 panic("could not allocate memory for VX save area\n");
560 __pcpu_sigp_relax(address
, SIGP_STORE_ADDITIONAL_STATUS
, vx_sa
, NULL
);
561 memcpy(sa_ext
->vx_regs
, (void *) vx_sa
, sizeof(sa_ext
->vx_regs
));
566 * Collect CPU state of the previous, crashed system.
567 * There are four cases:
568 * 1) standard zfcp dump
569 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
570 * The state for all CPUs except the boot CPU needs to be collected
571 * with sigp stop-and-store-status. The boot CPU state is located in
572 * the absolute lowcore of the memory stored in the HSA. The zcore code
573 * will allocate the save area and copy the boot CPU state from the HSA.
574 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
575 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
576 * The state for all CPUs except the boot CPU needs to be collected
577 * with sigp stop-and-store-status. The firmware or the boot-loader
578 * stored the registers of the boot CPU in the absolute lowcore in the
579 * memory of the old system.
580 * 3) kdump and the old kernel did not store the CPU state,
581 * or stand-alone kdump for DASD
582 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
583 * The state for all CPUs except the boot CPU needs to be collected
584 * with sigp stop-and-store-status. The kexec code or the boot-loader
585 * stored the registers of the boot CPU in the memory of the old system.
586 * 4) kdump and the old kernel stored the CPU state
587 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
588 * The state of all CPUs is stored in ELF sections in the memory of the
589 * old system. The ELF sections are picked up by the crash_dump code
590 * via elfcorehdr_addr.
592 static void __init
smp_store_cpu_states(struct sclp_cpu_info
*info
)
594 unsigned int cpu
, address
, i
, j
;
597 if (is_kdump_kernel())
598 /* Previous system stored the CPU states. Nothing to do. */
600 if (!(OLDMEM_BASE
|| ipl_info
.type
== IPL_TYPE_FCP_DUMP
))
601 /* No previous system present, normal boot. */
603 /* Set multi-threading state to the previous system. */
604 pcpu_set_smt(sclp_get_mtid_prev());
605 /* Collect CPU states. */
607 for (i
= 0; i
< info
->configured
; i
++) {
608 /* Skip CPUs with different CPU type. */
609 if (info
->has_cpu_type
&& info
->cpu
[i
].type
!= boot_cpu_type
)
611 for (j
= 0; j
<= smp_cpu_mtid
; j
++, cpu
++) {
612 address
= (info
->cpu
[i
].core_id
<< smp_cpu_mt_shift
) + j
;
613 is_boot_cpu
= (address
== pcpu_devices
[0].address
);
614 if (is_boot_cpu
&& !OLDMEM_BASE
)
615 /* Skip boot CPU for standard zfcp dump. */
617 /* Get state for this CPu. */
618 __smp_store_cpu_state(cpu
, address
, is_boot_cpu
);
623 int smp_store_status(int cpu
)
628 pcpu
= pcpu_devices
+ cpu
;
629 if (__pcpu_sigp_relax(pcpu
->address
, SIGP_STOP_AND_STORE_STATUS
,
630 0, NULL
) != SIGP_CC_ORDER_CODE_ACCEPTED
)
634 vx_sa
= __pa(pcpu
->lowcore
->vector_save_area_addr
);
635 __pcpu_sigp_relax(pcpu
->address
, SIGP_STORE_ADDITIONAL_STATUS
,
640 #endif /* CONFIG_CRASH_DUMP */
642 void smp_cpu_set_polarization(int cpu
, int val
)
644 pcpu_devices
[cpu
].polarization
= val
;
647 int smp_cpu_get_polarization(int cpu
)
649 return pcpu_devices
[cpu
].polarization
;
652 static struct sclp_cpu_info
*smp_get_cpu_info(void)
654 static int use_sigp_detection
;
655 struct sclp_cpu_info
*info
;
658 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
659 if (info
&& (use_sigp_detection
|| sclp_get_cpu_info(info
))) {
660 use_sigp_detection
= 1;
661 for (address
= 0; address
<= MAX_CPU_ADDRESS
;
662 address
+= (1U << smp_cpu_mt_shift
)) {
663 if (__pcpu_sigp_relax(address
, SIGP_SENSE
, 0, NULL
) ==
664 SIGP_CC_NOT_OPERATIONAL
)
666 info
->cpu
[info
->configured
].core_id
=
667 address
>> smp_cpu_mt_shift
;
670 info
->combined
= info
->configured
;
675 static int smp_add_present_cpu(int cpu
);
677 static int __smp_rescan_cpus(struct sclp_cpu_info
*info
, int sysfs_add
)
685 cpumask_xor(&avail
, cpu_possible_mask
, cpu_present_mask
);
686 cpu
= cpumask_first(&avail
);
687 for (i
= 0; (i
< info
->combined
) && (cpu
< nr_cpu_ids
); i
++) {
688 if (info
->has_cpu_type
&& info
->cpu
[i
].type
!= boot_cpu_type
)
690 address
= info
->cpu
[i
].core_id
<< smp_cpu_mt_shift
;
691 for (j
= 0; j
<= smp_cpu_mtid
; j
++) {
692 if (pcpu_find_address(cpu_present_mask
, address
+ j
))
694 pcpu
= pcpu_devices
+ cpu
;
695 pcpu
->address
= address
+ j
;
697 (cpu
>= info
->configured
*(smp_cpu_mtid
+ 1)) ?
698 CPU_STATE_STANDBY
: CPU_STATE_CONFIGURED
;
699 smp_cpu_set_polarization(cpu
, POLARIZATION_UNKNOWN
);
700 set_cpu_present(cpu
, true);
701 if (sysfs_add
&& smp_add_present_cpu(cpu
) != 0)
702 set_cpu_present(cpu
, false);
705 cpu
= cpumask_next(cpu
, &avail
);
706 if (cpu
>= nr_cpu_ids
)
713 static void __init
smp_detect_cpus(void)
715 unsigned int cpu
, mtid
, c_cpus
, s_cpus
;
716 struct sclp_cpu_info
*info
;
719 /* Get CPU information */
720 info
= smp_get_cpu_info();
722 panic("smp_detect_cpus failed to allocate memory\n");
724 /* Find boot CPU type */
725 if (info
->has_cpu_type
) {
727 for (cpu
= 0; cpu
< info
->combined
; cpu
++)
728 if (info
->cpu
[cpu
].core_id
== address
) {
729 /* The boot cpu dictates the cpu type. */
730 boot_cpu_type
= info
->cpu
[cpu
].type
;
733 if (cpu
>= info
->combined
)
734 panic("Could not find boot CPU type");
737 #ifdef CONFIG_CRASH_DUMP
738 /* Collect CPU state of previous system */
739 smp_store_cpu_states(info
);
742 /* Set multi-threading state for the current system */
743 mtid
= sclp_get_mtid(boot_cpu_type
);
744 mtid
= (mtid
< smp_max_threads
) ? mtid
: smp_max_threads
- 1;
747 /* Print number of CPUs */
749 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
750 if (info
->has_cpu_type
&& info
->cpu
[cpu
].type
!= boot_cpu_type
)
752 if (cpu
< info
->configured
)
753 c_cpus
+= smp_cpu_mtid
+ 1;
755 s_cpus
+= smp_cpu_mtid
+ 1;
757 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus
, s_cpus
);
759 /* Add CPUs present at boot */
761 __smp_rescan_cpus(info
, 0);
767 * Activate a secondary processor.
769 static void smp_start_secondary(void *cpuvoid
)
771 S390_lowcore
.last_update_clock
= get_tod_clock();
772 S390_lowcore
.restart_stack
= (unsigned long) restart_stack
;
773 S390_lowcore
.restart_fn
= (unsigned long) do_restart
;
774 S390_lowcore
.restart_data
= 0;
775 S390_lowcore
.restart_source
= -1UL;
776 restore_access_regs(S390_lowcore
.access_regs_save_area
);
777 __ctl_load(S390_lowcore
.cregs_save_area
, 0, 15);
778 __load_psw_mask(PSW_KERNEL_BITS
| PSW_MASK_DAT
);
784 notify_cpu_starting(smp_processor_id());
785 set_cpu_online(smp_processor_id(), true);
786 inc_irq_stat(CPU_RST
);
788 cpu_startup_entry(CPUHP_ONLINE
);
791 /* Upping and downing of CPUs */
792 int __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
797 pcpu
= pcpu_devices
+ cpu
;
798 if (pcpu
->state
!= CPU_STATE_CONFIGURED
)
800 base
= cpu
- (cpu
% (smp_cpu_mtid
+ 1));
801 for (i
= 0; i
<= smp_cpu_mtid
; i
++) {
802 if (base
+ i
< nr_cpu_ids
)
803 if (cpu_online(base
+ i
))
807 * If this is the first CPU of the core to get online
808 * do an initial CPU reset.
810 if (i
> smp_cpu_mtid
&&
811 pcpu_sigp_retry(pcpu_devices
+ base
, SIGP_INITIAL_CPU_RESET
, 0) !=
812 SIGP_CC_ORDER_CODE_ACCEPTED
)
815 rc
= pcpu_alloc_lowcore(pcpu
, cpu
);
818 pcpu_prepare_secondary(pcpu
, cpu
);
819 pcpu_attach_task(pcpu
, tidle
);
820 pcpu_start_fn(pcpu
, smp_start_secondary
, NULL
);
821 /* Wait until cpu puts itself in the online & active maps */
822 while (!cpu_online(cpu
) || !cpu_active(cpu
))
827 static unsigned int setup_possible_cpus __initdata
;
829 static int __init
_setup_possible_cpus(char *s
)
831 get_option(&s
, &setup_possible_cpus
);
834 early_param("possible_cpus", _setup_possible_cpus
);
836 #ifdef CONFIG_HOTPLUG_CPU
838 int __cpu_disable(void)
840 unsigned long cregs
[16];
842 /* Handle possible pending IPIs */
843 smp_handle_ext_call();
844 set_cpu_online(smp_processor_id(), false);
845 /* Disable pseudo page faults on this cpu. */
847 /* Disable interrupt sources via control register. */
848 __ctl_store(cregs
, 0, 15);
849 cregs
[0] &= ~0x0000ee70UL
; /* disable all external interrupts */
850 cregs
[6] &= ~0xff000000UL
; /* disable all I/O interrupts */
851 cregs
[14] &= ~0x1f000000UL
; /* disable most machine checks */
852 __ctl_load(cregs
, 0, 15);
853 clear_cpu_flag(CIF_NOHZ_DELAY
);
857 void __cpu_die(unsigned int cpu
)
861 /* Wait until target cpu is down */
862 pcpu
= pcpu_devices
+ cpu
;
863 while (!pcpu_stopped(pcpu
))
865 pcpu_free_lowcore(pcpu
);
866 atomic_dec(&init_mm
.context
.attach_count
);
867 cpumask_clear_cpu(cpu
, mm_cpumask(&init_mm
));
868 if (MACHINE_HAS_TLB_LC
)
869 cpumask_clear_cpu(cpu
, &init_mm
.context
.cpu_attach_mask
);
872 void __noreturn
cpu_die(void)
875 pcpu_sigp_retry(pcpu_devices
+ smp_processor_id(), SIGP_STOP
, 0);
879 #endif /* CONFIG_HOTPLUG_CPU */
881 void __init
smp_fill_possible_mask(void)
883 unsigned int possible
, sclp_max
, cpu
;
885 sclp_max
= min(smp_max_threads
, sclp_get_mtid_max() + 1);
886 sclp_max
= sclp_get_max_cpu() * sclp_max
?: nr_cpu_ids
;
887 possible
= setup_possible_cpus
?: nr_cpu_ids
;
888 possible
= min(possible
, sclp_max
);
889 for (cpu
= 0; cpu
< possible
&& cpu
< nr_cpu_ids
; cpu
++)
890 set_cpu_possible(cpu
, true);
893 void __init
smp_prepare_cpus(unsigned int max_cpus
)
895 /* request the 0x1201 emergency signal external interrupt */
896 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG
, do_ext_call_interrupt
))
897 panic("Couldn't request external interrupt 0x1201");
898 /* request the 0x1202 external call external interrupt */
899 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL
, do_ext_call_interrupt
))
900 panic("Couldn't request external interrupt 0x1202");
904 void __init
smp_prepare_boot_cpu(void)
906 struct pcpu
*pcpu
= pcpu_devices
;
908 pcpu
->state
= CPU_STATE_CONFIGURED
;
909 pcpu
->address
= stap();
910 pcpu
->lowcore
= (struct _lowcore
*)(unsigned long) store_prefix();
911 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
912 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN
);
913 set_cpu_present(0, true);
914 set_cpu_online(0, true);
917 void __init
smp_cpus_done(unsigned int max_cpus
)
921 void __init
smp_setup_processor_id(void)
923 S390_lowcore
.cpu_nr
= 0;
924 S390_lowcore
.spinlock_lockval
= arch_spin_lockval(0);
928 * the frequency of the profiling timer can be changed
929 * by writing a multiplier value into /proc/profile.
931 * usually you want to run this on all CPUs ;)
933 int setup_profiling_timer(unsigned int multiplier
)
938 #ifdef CONFIG_HOTPLUG_CPU
939 static ssize_t
cpu_configure_show(struct device
*dev
,
940 struct device_attribute
*attr
, char *buf
)
944 mutex_lock(&smp_cpu_state_mutex
);
945 count
= sprintf(buf
, "%d\n", pcpu_devices
[dev
->id
].state
);
946 mutex_unlock(&smp_cpu_state_mutex
);
950 static ssize_t
cpu_configure_store(struct device
*dev
,
951 struct device_attribute
*attr
,
952 const char *buf
, size_t count
)
958 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
960 if (val
!= 0 && val
!= 1)
963 mutex_lock(&smp_cpu_state_mutex
);
965 /* disallow configuration changes of online cpus and cpu 0 */
967 cpu
-= cpu
% (smp_cpu_mtid
+ 1);
970 for (i
= 0; i
<= smp_cpu_mtid
; i
++)
971 if (cpu_online(cpu
+ i
))
973 pcpu
= pcpu_devices
+ cpu
;
977 if (pcpu
->state
!= CPU_STATE_CONFIGURED
)
979 rc
= sclp_cpu_deconfigure(pcpu
->address
>> smp_cpu_mt_shift
);
982 for (i
= 0; i
<= smp_cpu_mtid
; i
++) {
983 if (cpu
+ i
>= nr_cpu_ids
|| !cpu_present(cpu
+ i
))
985 pcpu
[i
].state
= CPU_STATE_STANDBY
;
986 smp_cpu_set_polarization(cpu
+ i
,
987 POLARIZATION_UNKNOWN
);
989 topology_expect_change();
992 if (pcpu
->state
!= CPU_STATE_STANDBY
)
994 rc
= sclp_cpu_configure(pcpu
->address
>> smp_cpu_mt_shift
);
997 for (i
= 0; i
<= smp_cpu_mtid
; i
++) {
998 if (cpu
+ i
>= nr_cpu_ids
|| !cpu_present(cpu
+ i
))
1000 pcpu
[i
].state
= CPU_STATE_CONFIGURED
;
1001 smp_cpu_set_polarization(cpu
+ i
,
1002 POLARIZATION_UNKNOWN
);
1004 topology_expect_change();
1010 mutex_unlock(&smp_cpu_state_mutex
);
1012 return rc
? rc
: count
;
1014 static DEVICE_ATTR(configure
, 0644, cpu_configure_show
, cpu_configure_store
);
1015 #endif /* CONFIG_HOTPLUG_CPU */
1017 static ssize_t
show_cpu_address(struct device
*dev
,
1018 struct device_attribute
*attr
, char *buf
)
1020 return sprintf(buf
, "%d\n", pcpu_devices
[dev
->id
].address
);
1022 static DEVICE_ATTR(address
, 0444, show_cpu_address
, NULL
);
1024 static struct attribute
*cpu_common_attrs
[] = {
1025 #ifdef CONFIG_HOTPLUG_CPU
1026 &dev_attr_configure
.attr
,
1028 &dev_attr_address
.attr
,
1032 static struct attribute_group cpu_common_attr_group
= {
1033 .attrs
= cpu_common_attrs
,
1036 static struct attribute
*cpu_online_attrs
[] = {
1037 &dev_attr_idle_count
.attr
,
1038 &dev_attr_idle_time_us
.attr
,
1042 static struct attribute_group cpu_online_attr_group
= {
1043 .attrs
= cpu_online_attrs
,
1046 static int smp_cpu_notify(struct notifier_block
*self
, unsigned long action
,
1049 unsigned int cpu
= (unsigned int)(long)hcpu
;
1050 struct device
*s
= &per_cpu(cpu_device
, cpu
)->dev
;
1053 switch (action
& ~CPU_TASKS_FROZEN
) {
1055 err
= sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
1058 sysfs_remove_group(&s
->kobj
, &cpu_online_attr_group
);
1061 return notifier_from_errno(err
);
1064 static int smp_add_present_cpu(int cpu
)
1070 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1073 per_cpu(cpu_device
, cpu
) = c
;
1075 c
->hotpluggable
= 1;
1076 rc
= register_cpu(c
, cpu
);
1079 rc
= sysfs_create_group(&s
->kobj
, &cpu_common_attr_group
);
1082 if (cpu_online(cpu
)) {
1083 rc
= sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
1087 rc
= topology_cpu_init(c
);
1093 if (cpu_online(cpu
))
1094 sysfs_remove_group(&s
->kobj
, &cpu_online_attr_group
);
1096 sysfs_remove_group(&s
->kobj
, &cpu_common_attr_group
);
1098 #ifdef CONFIG_HOTPLUG_CPU
1105 #ifdef CONFIG_HOTPLUG_CPU
1107 int __ref
smp_rescan_cpus(void)
1109 struct sclp_cpu_info
*info
;
1112 info
= smp_get_cpu_info();
1116 mutex_lock(&smp_cpu_state_mutex
);
1117 nr
= __smp_rescan_cpus(info
, 1);
1118 mutex_unlock(&smp_cpu_state_mutex
);
1122 topology_schedule_update();
1126 static ssize_t __ref
rescan_store(struct device
*dev
,
1127 struct device_attribute
*attr
,
1133 rc
= smp_rescan_cpus();
1134 return rc
? rc
: count
;
1136 static DEVICE_ATTR(rescan
, 0200, NULL
, rescan_store
);
1137 #endif /* CONFIG_HOTPLUG_CPU */
1139 static int __init
s390_smp_init(void)
1143 #ifdef CONFIG_HOTPLUG_CPU
1144 rc
= device_create_file(cpu_subsys
.dev_root
, &dev_attr_rescan
);
1148 cpu_notifier_register_begin();
1149 for_each_present_cpu(cpu
) {
1150 rc
= smp_add_present_cpu(cpu
);
1155 __hotcpu_notifier(smp_cpu_notify
, 0);
1158 cpu_notifier_register_done();
1161 subsys_initcall(s390_smp_init
);