2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999, 2009
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #define KMSG_COMPONENT "cpu"
24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 #include <linux/workqueue.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/delay.h>
34 #include <linux/cache.h>
35 #include <linux/interrupt.h>
36 #include <linux/irqflags.h>
37 #include <linux/cpu.h>
38 #include <linux/timex.h>
39 #include <linux/bootmem.h>
40 #include <linux/slab.h>
41 #include <linux/crash_dump.h>
42 #include <asm/asm-offsets.h>
44 #include <asm/setup.h>
46 #include <asm/pgalloc.h>
48 #include <asm/cpcmd.h>
49 #include <asm/tlbflush.h>
50 #include <asm/timer.h>
51 #include <asm/lowcore.h>
53 #include <asm/cputime.h>
58 /* logical cpu to cpu address */
59 unsigned short __cpu_logical_map
[NR_CPUS
];
61 static struct task_struct
*current_set
[NR_CPUS
];
63 static u8 smp_cpu_type
;
64 static int smp_use_sigp_detection
;
71 DEFINE_MUTEX(smp_cpu_state_mutex
);
72 int smp_cpu_polarization
[NR_CPUS
];
73 static int smp_cpu_state
[NR_CPUS
];
74 static int cpu_management
;
76 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
78 static void smp_ext_bitcall(int, int);
80 static int raw_cpu_stopped(int cpu
)
84 switch (raw_sigp_ps(&status
, 0, cpu
, sigp_sense
)) {
85 case sigp_status_stored
:
86 /* Check for stopped and check stop state */
96 static inline int cpu_stopped(int cpu
)
98 return raw_cpu_stopped(cpu_logical_map(cpu
));
102 * Ensure that PSW restart is done on an online CPU
104 void smp_restart_with_online_cpu(void)
108 for_each_online_cpu(cpu
) {
109 if (stap() == __cpu_logical_map
[cpu
]) {
110 /* We are online: Enable DAT again and return */
111 __load_psw_mask(psw_kernel_bits
& ~PSW_MASK_MCHECK
);
115 /* We are not online: Do PSW restart on an online CPU */
116 while (sigp(cpu
, sigp_restart
) == sigp_busy
)
118 /* And stop ourself */
119 while (raw_sigp(stap(), sigp_stop
) == sigp_busy
)
124 void smp_switch_to_ipl_cpu(void (*func
)(void *), void *data
)
126 struct _lowcore
*lc
, *current_lc
;
127 struct stack_frame
*sf
;
128 struct pt_regs
*regs
;
131 if (smp_processor_id() == 0)
133 __load_psw_mask(PSW_BASE_BITS
| PSW_DEFAULT_KEY
);
134 /* Disable lowcore protection */
135 __ctl_clear_bit(0, 28);
136 current_lc
= lowcore_ptr
[smp_processor_id()];
140 lc
->restart_psw
.mask
= PSW_BASE_BITS
| PSW_DEFAULT_KEY
;
141 lc
->restart_psw
.addr
= PSW_ADDR_AMODE
| (unsigned long) smp_restart_cpu
;
143 smp_switch_to_cpu(func
, data
, 0, stap(), __cpu_logical_map
[0]);
144 while (sigp(0, sigp_stop_and_store_status
) == sigp_busy
)
146 sp
= lc
->panic_stack
;
147 sp
-= sizeof(struct pt_regs
);
148 regs
= (struct pt_regs
*) sp
;
149 memcpy(®s
->gprs
, ¤t_lc
->gpregs_save_area
, sizeof(regs
->gprs
));
150 regs
->psw
= lc
->psw_save_area
;
151 sp
-= STACK_FRAME_OVERHEAD
;
152 sf
= (struct stack_frame
*) sp
;
153 sf
->back_chain
= regs
->gprs
[15];
154 smp_switch_to_cpu(func
, data
, sp
, stap(), __cpu_logical_map
[0]);
157 void smp_send_stop(void)
161 /* Disable all interrupts/machine checks */
162 __load_psw_mask(psw_kernel_bits
& ~PSW_MASK_MCHECK
);
163 trace_hardirqs_off();
165 /* stop all processors */
166 for_each_online_cpu(cpu
) {
167 if (cpu
== smp_processor_id())
170 rc
= sigp(cpu
, sigp_stop
);
171 } while (rc
== sigp_busy
);
173 while (!cpu_stopped(cpu
))
179 * This is the main routine where commands issued by other
183 static void do_ext_call_interrupt(unsigned int ext_int_code
,
184 unsigned int param32
, unsigned long param64
)
188 kstat_cpu(smp_processor_id()).irqs
[EXTINT_IPI
]++;
190 * handle bit signal external calls
192 bits
= xchg(&S390_lowcore
.ext_call_fast
, 0);
194 if (test_bit(ec_schedule
, &bits
))
197 if (test_bit(ec_call_function
, &bits
))
198 generic_smp_call_function_interrupt();
200 if (test_bit(ec_call_function_single
, &bits
))
201 generic_smp_call_function_single_interrupt();
205 * Send an external call sigp to another cpu and return without waiting
206 * for its completion.
208 static void smp_ext_bitcall(int cpu
, int sig
)
211 * Set signaling bit in lowcore of target cpu and kick it
213 set_bit(sig
, (unsigned long *) &lowcore_ptr
[cpu
]->ext_call_fast
);
214 while (sigp(cpu
, sigp_emergency_signal
) == sigp_busy
)
218 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
222 for_each_cpu(cpu
, mask
)
223 smp_ext_bitcall(cpu
, ec_call_function
);
226 void arch_send_call_function_single_ipi(int cpu
)
228 smp_ext_bitcall(cpu
, ec_call_function_single
);
233 * this function sends a 'purge tlb' signal to another CPU.
235 static void smp_ptlb_callback(void *info
)
240 void smp_ptlb_all(void)
242 on_each_cpu(smp_ptlb_callback
, NULL
, 1);
244 EXPORT_SYMBOL(smp_ptlb_all
);
245 #endif /* ! CONFIG_64BIT */
248 * this function sends a 'reschedule' IPI to another CPU.
249 * it goes straight through and wastes no time serializing
250 * anything. Worst case is that we lose a reschedule ...
252 void smp_send_reschedule(int cpu
)
254 smp_ext_bitcall(cpu
, ec_schedule
);
258 * parameter area for the set/clear control bit callbacks
260 struct ec_creg_mask_parms
{
261 unsigned long orvals
[16];
262 unsigned long andvals
[16];
266 * callback for setting/clearing control bits
268 static void smp_ctl_bit_callback(void *info
)
270 struct ec_creg_mask_parms
*pp
= info
;
271 unsigned long cregs
[16];
274 __ctl_store(cregs
, 0, 15);
275 for (i
= 0; i
<= 15; i
++)
276 cregs
[i
] = (cregs
[i
] & pp
->andvals
[i
]) | pp
->orvals
[i
];
277 __ctl_load(cregs
, 0, 15);
281 * Set a bit in a control register of all cpus
283 void smp_ctl_set_bit(int cr
, int bit
)
285 struct ec_creg_mask_parms parms
;
287 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
288 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
289 parms
.orvals
[cr
] = 1UL << bit
;
290 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
292 EXPORT_SYMBOL(smp_ctl_set_bit
);
295 * Clear a bit in a control register of all cpus
297 void smp_ctl_clear_bit(int cr
, int bit
)
299 struct ec_creg_mask_parms parms
;
301 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
302 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
303 parms
.andvals
[cr
] = ~(1UL << bit
);
304 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
306 EXPORT_SYMBOL(smp_ctl_clear_bit
);
308 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
310 static void __init
smp_get_save_area(unsigned int cpu
, unsigned int phy_cpu
)
312 if (ipl_info
.type
!= IPL_TYPE_FCP_DUMP
&& !OLDMEM_BASE
)
314 if (is_kdump_kernel())
316 if (cpu
>= NR_CPUS
) {
317 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
318 "the dump\n", cpu
, NR_CPUS
- 1);
321 zfcpdump_save_areas
[cpu
] = kmalloc(sizeof(struct save_area
), GFP_KERNEL
);
322 while (raw_sigp(phy_cpu
, sigp_stop_and_store_status
) == sigp_busy
)
324 memcpy_real(zfcpdump_save_areas
[cpu
],
325 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE
,
326 sizeof(struct save_area
));
329 struct save_area
*zfcpdump_save_areas
[NR_CPUS
+ 1];
330 EXPORT_SYMBOL_GPL(zfcpdump_save_areas
);
334 static inline void smp_get_save_area(unsigned int cpu
, unsigned int phy_cpu
) { }
336 #endif /* CONFIG_ZFCPDUMP */
338 static int cpu_known(int cpu_id
)
342 for_each_present_cpu(cpu
) {
343 if (__cpu_logical_map
[cpu
] == cpu_id
)
349 static int smp_rescan_cpus_sigp(cpumask_t avail
)
351 int cpu_id
, logical_cpu
;
353 logical_cpu
= cpumask_first(&avail
);
354 if (logical_cpu
>= nr_cpu_ids
)
356 for (cpu_id
= 0; cpu_id
<= MAX_CPU_ADDRESS
; cpu_id
++) {
357 if (cpu_known(cpu_id
))
359 __cpu_logical_map
[logical_cpu
] = cpu_id
;
360 smp_cpu_polarization
[logical_cpu
] = POLARIZATION_UNKNWN
;
361 if (!cpu_stopped(logical_cpu
))
363 set_cpu_present(logical_cpu
, true);
364 smp_cpu_state
[logical_cpu
] = CPU_STATE_CONFIGURED
;
365 logical_cpu
= cpumask_next(logical_cpu
, &avail
);
366 if (logical_cpu
>= nr_cpu_ids
)
372 static int smp_rescan_cpus_sclp(cpumask_t avail
)
374 struct sclp_cpu_info
*info
;
375 int cpu_id
, logical_cpu
, cpu
;
378 logical_cpu
= cpumask_first(&avail
);
379 if (logical_cpu
>= nr_cpu_ids
)
381 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
384 rc
= sclp_get_cpu_info(info
);
387 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
388 if (info
->has_cpu_type
&& info
->cpu
[cpu
].type
!= smp_cpu_type
)
390 cpu_id
= info
->cpu
[cpu
].address
;
391 if (cpu_known(cpu_id
))
393 __cpu_logical_map
[logical_cpu
] = cpu_id
;
394 smp_cpu_polarization
[logical_cpu
] = POLARIZATION_UNKNWN
;
395 set_cpu_present(logical_cpu
, true);
396 if (cpu
>= info
->configured
)
397 smp_cpu_state
[logical_cpu
] = CPU_STATE_STANDBY
;
399 smp_cpu_state
[logical_cpu
] = CPU_STATE_CONFIGURED
;
400 logical_cpu
= cpumask_next(logical_cpu
, &avail
);
401 if (logical_cpu
>= nr_cpu_ids
)
409 static int __smp_rescan_cpus(void)
413 cpumask_xor(&avail
, cpu_possible_mask
, cpu_present_mask
);
414 if (smp_use_sigp_detection
)
415 return smp_rescan_cpus_sigp(avail
);
417 return smp_rescan_cpus_sclp(avail
);
420 static void __init
smp_detect_cpus(void)
422 unsigned int cpu
, c_cpus
, s_cpus
;
423 struct sclp_cpu_info
*info
;
424 u16 boot_cpu_addr
, cpu_addr
;
428 boot_cpu_addr
= __cpu_logical_map
[0];
429 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
431 panic("smp_detect_cpus failed to allocate memory\n");
432 #ifdef CONFIG_CRASH_DUMP
433 if (OLDMEM_BASE
&& !is_kdump_kernel()) {
434 struct save_area
*save_area
;
436 save_area
= kmalloc(sizeof(*save_area
), GFP_KERNEL
);
438 panic("could not allocate memory for save area\n");
439 copy_oldmem_page(1, (void *) save_area
, sizeof(*save_area
),
441 zfcpdump_save_areas
[0] = save_area
;
444 /* Use sigp detection algorithm if sclp doesn't work. */
445 if (sclp_get_cpu_info(info
)) {
446 smp_use_sigp_detection
= 1;
447 for (cpu
= 0; cpu
<= MAX_CPU_ADDRESS
; cpu
++) {
448 if (cpu
== boot_cpu_addr
)
450 if (!raw_cpu_stopped(cpu
))
452 smp_get_save_area(c_cpus
, cpu
);
458 if (info
->has_cpu_type
) {
459 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
460 if (info
->cpu
[cpu
].address
== boot_cpu_addr
) {
461 smp_cpu_type
= info
->cpu
[cpu
].type
;
467 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
468 if (info
->has_cpu_type
&& info
->cpu
[cpu
].type
!= smp_cpu_type
)
470 cpu_addr
= info
->cpu
[cpu
].address
;
471 if (cpu_addr
== boot_cpu_addr
)
473 if (!raw_cpu_stopped(cpu_addr
)) {
477 smp_get_save_area(c_cpus
, cpu_addr
);
482 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus
, s_cpus
);
489 * Activate a secondary processor.
491 int __cpuinit
start_secondary(void *cpuvoid
)
499 notify_cpu_starting(smp_processor_id());
501 set_cpu_online(smp_processor_id(), true);
503 __ctl_clear_bit(0, 28); /* Disable lowcore protection */
504 S390_lowcore
.restart_psw
.mask
= PSW_BASE_BITS
| PSW_DEFAULT_KEY
;
505 S390_lowcore
.restart_psw
.addr
=
506 PSW_ADDR_AMODE
| (unsigned long) psw_restart_int_handler
;
507 __ctl_set_bit(0, 28); /* Enable lowcore protection */
509 * Wait until the cpu which brought this one up marked it
510 * active before enabling interrupts.
512 while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask
))
515 /* cpu_idle will call schedule for us */
521 struct work_struct work
;
522 struct task_struct
*idle
;
523 struct completion done
;
527 static void __cpuinit
smp_fork_idle(struct work_struct
*work
)
529 struct create_idle
*c_idle
;
531 c_idle
= container_of(work
, struct create_idle
, work
);
532 c_idle
->idle
= fork_idle(c_idle
->cpu
);
533 complete(&c_idle
->done
);
536 static int __cpuinit
smp_alloc_lowcore(int cpu
)
538 unsigned long async_stack
, panic_stack
;
539 struct _lowcore
*lowcore
;
541 lowcore
= (void *) __get_free_pages(GFP_KERNEL
| GFP_DMA
, LC_ORDER
);
544 async_stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
545 panic_stack
= __get_free_page(GFP_KERNEL
);
546 if (!panic_stack
|| !async_stack
)
548 memcpy(lowcore
, &S390_lowcore
, 512);
549 memset((char *)lowcore
+ 512, 0, sizeof(*lowcore
) - 512);
550 lowcore
->async_stack
= async_stack
+ ASYNC_SIZE
;
551 lowcore
->panic_stack
= panic_stack
+ PAGE_SIZE
;
552 lowcore
->restart_psw
.mask
= PSW_BASE_BITS
| PSW_DEFAULT_KEY
;
553 lowcore
->restart_psw
.addr
=
554 PSW_ADDR_AMODE
| (unsigned long) restart_int_handler
;
555 if (user_mode
!= HOME_SPACE_MODE
)
556 lowcore
->restart_psw
.mask
|= PSW_ASC_HOME
;
558 if (MACHINE_HAS_IEEE
) {
559 unsigned long save_area
;
561 save_area
= get_zeroed_page(GFP_KERNEL
);
564 lowcore
->extended_save_area_addr
= (u32
) save_area
;
567 if (vdso_alloc_per_cpu(cpu
, lowcore
))
570 lowcore_ptr
[cpu
] = lowcore
;
574 free_page(panic_stack
);
575 free_pages(async_stack
, ASYNC_ORDER
);
576 free_pages((unsigned long) lowcore
, LC_ORDER
);
580 static void smp_free_lowcore(int cpu
)
582 struct _lowcore
*lowcore
;
584 lowcore
= lowcore_ptr
[cpu
];
586 if (MACHINE_HAS_IEEE
)
587 free_page((unsigned long) lowcore
->extended_save_area_addr
);
589 vdso_free_per_cpu(cpu
, lowcore
);
591 free_page(lowcore
->panic_stack
- PAGE_SIZE
);
592 free_pages(lowcore
->async_stack
- ASYNC_SIZE
, ASYNC_ORDER
);
593 free_pages((unsigned long) lowcore
, LC_ORDER
);
594 lowcore_ptr
[cpu
] = NULL
;
597 /* Upping and downing of CPUs */
598 int __cpuinit
__cpu_up(unsigned int cpu
)
600 struct _lowcore
*cpu_lowcore
;
601 struct create_idle c_idle
;
602 struct task_struct
*idle
;
603 struct stack_frame
*sf
;
607 if (smp_cpu_state
[cpu
] != CPU_STATE_CONFIGURED
)
609 idle
= current_set
[cpu
];
611 c_idle
.done
= COMPLETION_INITIALIZER_ONSTACK(c_idle
.done
);
612 INIT_WORK_ONSTACK(&c_idle
.work
, smp_fork_idle
);
614 schedule_work(&c_idle
.work
);
615 wait_for_completion(&c_idle
.done
);
616 if (IS_ERR(c_idle
.idle
))
617 return PTR_ERR(c_idle
.idle
);
619 current_set
[cpu
] = c_idle
.idle
;
621 init_idle(idle
, cpu
);
622 if (smp_alloc_lowcore(cpu
))
625 ccode
= sigp(cpu
, sigp_initial_cpu_reset
);
626 if (ccode
== sigp_busy
)
628 if (ccode
== sigp_not_operational
)
630 } while (ccode
== sigp_busy
);
632 lowcore
= (u32
)(unsigned long)lowcore_ptr
[cpu
];
633 while (sigp_p(lowcore
, cpu
, sigp_set_prefix
) == sigp_busy
)
636 cpu_lowcore
= lowcore_ptr
[cpu
];
637 cpu_lowcore
->kernel_stack
= (unsigned long)
638 task_stack_page(idle
) + THREAD_SIZE
;
639 cpu_lowcore
->thread_info
= (unsigned long) task_thread_info(idle
);
640 sf
= (struct stack_frame
*) (cpu_lowcore
->kernel_stack
641 - sizeof(struct pt_regs
)
642 - sizeof(struct stack_frame
));
643 memset(sf
, 0, sizeof(struct stack_frame
));
644 sf
->gprs
[9] = (unsigned long) sf
;
645 cpu_lowcore
->save_area
[15] = (unsigned long) sf
;
646 __ctl_store(cpu_lowcore
->cregs_save_area
, 0, 15);
647 atomic_inc(&init_mm
.context
.attach_count
);
650 : : "a" (&cpu_lowcore
->access_regs_save_area
) : "memory");
651 cpu_lowcore
->percpu_offset
= __per_cpu_offset
[cpu
];
652 cpu_lowcore
->current_task
= (unsigned long) idle
;
653 cpu_lowcore
->cpu_nr
= cpu
;
654 cpu_lowcore
->kernel_asce
= S390_lowcore
.kernel_asce
;
655 cpu_lowcore
->machine_flags
= S390_lowcore
.machine_flags
;
656 cpu_lowcore
->ftrace_func
= S390_lowcore
.ftrace_func
;
657 memcpy(cpu_lowcore
->stfle_fac_list
, S390_lowcore
.stfle_fac_list
,
661 while (sigp(cpu
, sigp_restart
) == sigp_busy
)
664 while (!cpu_online(cpu
))
669 smp_free_lowcore(cpu
);
673 static int __init
setup_possible_cpus(char *s
)
677 pcpus
= simple_strtoul(s
, NULL
, 0);
678 init_cpu_possible(cpumask_of(0));
679 for (cpu
= 1; cpu
< pcpus
&& cpu
< nr_cpu_ids
; cpu
++)
680 set_cpu_possible(cpu
, true);
683 early_param("possible_cpus", setup_possible_cpus
);
685 #ifdef CONFIG_HOTPLUG_CPU
687 int __cpu_disable(void)
689 struct ec_creg_mask_parms cr_parms
;
690 int cpu
= smp_processor_id();
692 set_cpu_online(cpu
, false);
694 /* Disable pfault pseudo page faults on this cpu. */
697 memset(&cr_parms
.orvals
, 0, sizeof(cr_parms
.orvals
));
698 memset(&cr_parms
.andvals
, 0xff, sizeof(cr_parms
.andvals
));
700 /* disable all external interrupts */
701 cr_parms
.orvals
[0] = 0;
702 cr_parms
.andvals
[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
703 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 |
705 /* disable all I/O interrupts */
706 cr_parms
.orvals
[6] = 0;
707 cr_parms
.andvals
[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
708 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
709 /* disable most machine checks */
710 cr_parms
.orvals
[14] = 0;
711 cr_parms
.andvals
[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
714 smp_ctl_bit_callback(&cr_parms
);
719 void __cpu_die(unsigned int cpu
)
721 /* Wait until target cpu is down */
722 while (!cpu_stopped(cpu
))
724 while (sigp_p(0, cpu
, sigp_set_prefix
) == sigp_busy
)
726 smp_free_lowcore(cpu
);
727 atomic_dec(&init_mm
.context
.attach_count
);
730 void __noreturn
cpu_die(void)
733 while (sigp(smp_processor_id(), sigp_stop
) == sigp_busy
)
738 #endif /* CONFIG_HOTPLUG_CPU */
740 void __init
smp_prepare_cpus(unsigned int max_cpus
)
743 unsigned long save_area
= 0;
745 unsigned long async_stack
, panic_stack
;
746 struct _lowcore
*lowcore
;
750 /* request the 0x1201 emergency signal external interrupt */
751 if (register_external_interrupt(0x1201, do_ext_call_interrupt
) != 0)
752 panic("Couldn't request external interrupt 0x1201");
754 /* Reallocate current lowcore, but keep its contents. */
755 lowcore
= (void *) __get_free_pages(GFP_KERNEL
| GFP_DMA
, LC_ORDER
);
756 panic_stack
= __get_free_page(GFP_KERNEL
);
757 async_stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
758 BUG_ON(!lowcore
|| !panic_stack
|| !async_stack
);
760 if (MACHINE_HAS_IEEE
)
761 save_area
= get_zeroed_page(GFP_KERNEL
);
764 local_mcck_disable();
765 lowcore_ptr
[smp_processor_id()] = lowcore
;
766 *lowcore
= S390_lowcore
;
767 lowcore
->panic_stack
= panic_stack
+ PAGE_SIZE
;
768 lowcore
->async_stack
= async_stack
+ ASYNC_SIZE
;
770 if (MACHINE_HAS_IEEE
)
771 lowcore
->extended_save_area_addr
= (u32
) save_area
;
773 set_prefix((u32
)(unsigned long) lowcore
);
777 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore
))
782 void __init
smp_prepare_boot_cpu(void)
784 BUG_ON(smp_processor_id() != 0);
786 current_thread_info()->cpu
= 0;
787 set_cpu_present(0, true);
788 set_cpu_online(0, true);
789 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
790 current_set
[0] = current
;
791 smp_cpu_state
[0] = CPU_STATE_CONFIGURED
;
792 smp_cpu_polarization
[0] = POLARIZATION_UNKNWN
;
795 void __init
smp_cpus_done(unsigned int max_cpus
)
799 void __init
smp_setup_processor_id(void)
801 S390_lowcore
.cpu_nr
= 0;
802 __cpu_logical_map
[0] = stap();
806 * the frequency of the profiling timer can be changed
807 * by writing a multiplier value into /proc/profile.
809 * usually you want to run this on all CPUs ;)
811 int setup_profiling_timer(unsigned int multiplier
)
816 #ifdef CONFIG_HOTPLUG_CPU
817 static ssize_t
cpu_configure_show(struct sys_device
*dev
,
818 struct sysdev_attribute
*attr
, char *buf
)
822 mutex_lock(&smp_cpu_state_mutex
);
823 count
= sprintf(buf
, "%d\n", smp_cpu_state
[dev
->id
]);
824 mutex_unlock(&smp_cpu_state_mutex
);
828 static ssize_t
cpu_configure_store(struct sys_device
*dev
,
829 struct sysdev_attribute
*attr
,
830 const char *buf
, size_t count
)
836 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
838 if (val
!= 0 && val
!= 1)
842 mutex_lock(&smp_cpu_state_mutex
);
844 /* disallow configuration changes of online cpus and cpu 0 */
845 if (cpu_online(cpu
) || cpu
== 0)
850 if (smp_cpu_state
[cpu
] == CPU_STATE_CONFIGURED
) {
851 rc
= sclp_cpu_deconfigure(__cpu_logical_map
[cpu
]);
853 smp_cpu_state
[cpu
] = CPU_STATE_STANDBY
;
854 smp_cpu_polarization
[cpu
] = POLARIZATION_UNKNWN
;
859 if (smp_cpu_state
[cpu
] == CPU_STATE_STANDBY
) {
860 rc
= sclp_cpu_configure(__cpu_logical_map
[cpu
]);
862 smp_cpu_state
[cpu
] = CPU_STATE_CONFIGURED
;
863 smp_cpu_polarization
[cpu
] = POLARIZATION_UNKNWN
;
871 mutex_unlock(&smp_cpu_state_mutex
);
873 return rc
? rc
: count
;
875 static SYSDEV_ATTR(configure
, 0644, cpu_configure_show
, cpu_configure_store
);
876 #endif /* CONFIG_HOTPLUG_CPU */
878 static ssize_t
cpu_polarization_show(struct sys_device
*dev
,
879 struct sysdev_attribute
*attr
, char *buf
)
884 mutex_lock(&smp_cpu_state_mutex
);
885 switch (smp_cpu_polarization
[cpu
]) {
886 case POLARIZATION_HRZ
:
887 count
= sprintf(buf
, "horizontal\n");
889 case POLARIZATION_VL
:
890 count
= sprintf(buf
, "vertical:low\n");
892 case POLARIZATION_VM
:
893 count
= sprintf(buf
, "vertical:medium\n");
895 case POLARIZATION_VH
:
896 count
= sprintf(buf
, "vertical:high\n");
899 count
= sprintf(buf
, "unknown\n");
902 mutex_unlock(&smp_cpu_state_mutex
);
905 static SYSDEV_ATTR(polarization
, 0444, cpu_polarization_show
, NULL
);
907 static ssize_t
show_cpu_address(struct sys_device
*dev
,
908 struct sysdev_attribute
*attr
, char *buf
)
910 return sprintf(buf
, "%d\n", __cpu_logical_map
[dev
->id
]);
912 static SYSDEV_ATTR(address
, 0444, show_cpu_address
, NULL
);
915 static struct attribute
*cpu_common_attrs
[] = {
916 #ifdef CONFIG_HOTPLUG_CPU
917 &attr_configure
.attr
,
920 &attr_polarization
.attr
,
924 static struct attribute_group cpu_common_attr_group
= {
925 .attrs
= cpu_common_attrs
,
928 static ssize_t
show_capability(struct sys_device
*dev
,
929 struct sysdev_attribute
*attr
, char *buf
)
931 unsigned int capability
;
934 rc
= get_cpu_capability(&capability
);
937 return sprintf(buf
, "%u\n", capability
);
939 static SYSDEV_ATTR(capability
, 0444, show_capability
, NULL
);
941 static ssize_t
show_idle_count(struct sys_device
*dev
,
942 struct sysdev_attribute
*attr
, char *buf
)
944 struct s390_idle_data
*idle
;
945 unsigned long long idle_count
;
946 unsigned int sequence
;
948 idle
= &per_cpu(s390_idle
, dev
->id
);
950 sequence
= idle
->sequence
;
954 idle_count
= idle
->idle_count
;
955 if (idle
->idle_enter
)
958 if (idle
->sequence
!= sequence
)
960 return sprintf(buf
, "%llu\n", idle_count
);
962 static SYSDEV_ATTR(idle_count
, 0444, show_idle_count
, NULL
);
964 static ssize_t
show_idle_time(struct sys_device
*dev
,
965 struct sysdev_attribute
*attr
, char *buf
)
967 struct s390_idle_data
*idle
;
968 unsigned long long now
, idle_time
, idle_enter
;
969 unsigned int sequence
;
971 idle
= &per_cpu(s390_idle
, dev
->id
);
974 sequence
= idle
->sequence
;
978 idle_time
= idle
->idle_time
;
979 idle_enter
= idle
->idle_enter
;
980 if (idle_enter
!= 0ULL && idle_enter
< now
)
981 idle_time
+= now
- idle_enter
;
983 if (idle
->sequence
!= sequence
)
985 return sprintf(buf
, "%llu\n", idle_time
>> 12);
987 static SYSDEV_ATTR(idle_time_us
, 0444, show_idle_time
, NULL
);
989 static struct attribute
*cpu_online_attrs
[] = {
990 &attr_capability
.attr
,
991 &attr_idle_count
.attr
,
992 &attr_idle_time_us
.attr
,
996 static struct attribute_group cpu_online_attr_group
= {
997 .attrs
= cpu_online_attrs
,
1000 static int __cpuinit
smp_cpu_notify(struct notifier_block
*self
,
1001 unsigned long action
, void *hcpu
)
1003 unsigned int cpu
= (unsigned int)(long)hcpu
;
1004 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
1005 struct sys_device
*s
= &c
->sysdev
;
1006 struct s390_idle_data
*idle
;
1011 case CPU_ONLINE_FROZEN
:
1012 idle
= &per_cpu(s390_idle
, cpu
);
1013 memset(idle
, 0, sizeof(struct s390_idle_data
));
1014 err
= sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
1017 case CPU_DEAD_FROZEN
:
1018 sysfs_remove_group(&s
->kobj
, &cpu_online_attr_group
);
1021 return notifier_from_errno(err
);
1024 static struct notifier_block __cpuinitdata smp_cpu_nb
= {
1025 .notifier_call
= smp_cpu_notify
,
1028 static int __devinit
smp_add_present_cpu(int cpu
)
1030 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
1031 struct sys_device
*s
= &c
->sysdev
;
1034 c
->hotpluggable
= 1;
1035 rc
= register_cpu(c
, cpu
);
1038 rc
= sysfs_create_group(&s
->kobj
, &cpu_common_attr_group
);
1041 if (!cpu_online(cpu
))
1043 rc
= sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
1046 sysfs_remove_group(&s
->kobj
, &cpu_common_attr_group
);
1048 #ifdef CONFIG_HOTPLUG_CPU
1055 #ifdef CONFIG_HOTPLUG_CPU
1057 int __ref
smp_rescan_cpus(void)
1064 mutex_lock(&smp_cpu_state_mutex
);
1065 cpumask_copy(&newcpus
, cpu_present_mask
);
1066 rc
= __smp_rescan_cpus();
1069 cpumask_andnot(&newcpus
, cpu_present_mask
, &newcpus
);
1070 for_each_cpu(cpu
, &newcpus
) {
1071 rc
= smp_add_present_cpu(cpu
);
1073 set_cpu_present(cpu
, false);
1077 mutex_unlock(&smp_cpu_state_mutex
);
1079 if (!cpumask_empty(&newcpus
))
1080 topology_schedule_update();
1084 static ssize_t __ref
rescan_store(struct sysdev_class
*class,
1085 struct sysdev_class_attribute
*attr
,
1091 rc
= smp_rescan_cpus();
1092 return rc
? rc
: count
;
1094 static SYSDEV_CLASS_ATTR(rescan
, 0200, NULL
, rescan_store
);
1095 #endif /* CONFIG_HOTPLUG_CPU */
1097 static ssize_t
dispatching_show(struct sysdev_class
*class,
1098 struct sysdev_class_attribute
*attr
,
1103 mutex_lock(&smp_cpu_state_mutex
);
1104 count
= sprintf(buf
, "%d\n", cpu_management
);
1105 mutex_unlock(&smp_cpu_state_mutex
);
1109 static ssize_t
dispatching_store(struct sysdev_class
*dev
,
1110 struct sysdev_class_attribute
*attr
,
1117 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
1119 if (val
!= 0 && val
!= 1)
1123 mutex_lock(&smp_cpu_state_mutex
);
1124 if (cpu_management
== val
)
1126 rc
= topology_set_cpu_management(val
);
1128 cpu_management
= val
;
1130 mutex_unlock(&smp_cpu_state_mutex
);
1132 return rc
? rc
: count
;
1134 static SYSDEV_CLASS_ATTR(dispatching
, 0644, dispatching_show
,
1137 static int __init
topology_init(void)
1142 register_cpu_notifier(&smp_cpu_nb
);
1144 #ifdef CONFIG_HOTPLUG_CPU
1145 rc
= sysdev_class_create_file(&cpu_sysdev_class
, &attr_rescan
);
1149 rc
= sysdev_class_create_file(&cpu_sysdev_class
, &attr_dispatching
);
1152 for_each_present_cpu(cpu
) {
1153 rc
= smp_add_present_cpu(cpu
);
1159 subsys_initcall(topology_init
);