spi: Use dev_get_drvdata at appropriate places
[deliverable/linux.git] / arch / s390 / kernel / smp.c
1 /*
2 * SMP related functions
3 *
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
16 */
17
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21 #include <linux/workqueue.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/mm.h>
25 #include <linux/err.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/irqflags.h>
31 #include <linux/cpu.h>
32 #include <linux/slab.h>
33 #include <linux/crash_dump.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/switch_to.h>
36 #include <asm/facility.h>
37 #include <asm/ipl.h>
38 #include <asm/setup.h>
39 #include <asm/irq.h>
40 #include <asm/tlbflush.h>
41 #include <asm/vtimer.h>
42 #include <asm/lowcore.h>
43 #include <asm/sclp.h>
44 #include <asm/vdso.h>
45 #include <asm/debug.h>
46 #include <asm/os_info.h>
47 #include <asm/sigp.h>
48 #include "entry.h"
49
50 enum {
51 ec_schedule = 0,
52 ec_call_function_single,
53 ec_stop_cpu,
54 };
55
56 enum {
57 CPU_STATE_STANDBY,
58 CPU_STATE_CONFIGURED,
59 };
60
61 struct pcpu {
62 struct cpu cpu;
63 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
64 unsigned long async_stack; /* async stack for the cpu */
65 unsigned long panic_stack; /* panic stack for the cpu */
66 unsigned long ec_mask; /* bit mask for ec_xxx functions */
67 int state; /* physical cpu state */
68 int polarization; /* physical polarization */
69 u16 address; /* physical cpu address */
70 };
71
72 static u8 boot_cpu_type;
73 static u16 boot_cpu_address;
74 static struct pcpu pcpu_devices[NR_CPUS];
75
76 /*
77 * The smp_cpu_state_mutex must be held when changing the state or polarization
78 * member of a pcpu data structure within the pcpu_devices arreay.
79 */
80 DEFINE_MUTEX(smp_cpu_state_mutex);
81
82 /*
83 * Signal processor helper functions.
84 */
85 static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
86 {
87 register unsigned int reg1 asm ("1") = parm;
88 int cc;
89
90 asm volatile(
91 " sigp %1,%2,0(%3)\n"
92 " ipm %0\n"
93 " srl %0,28\n"
94 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
95 if (status && cc == 1)
96 *status = reg1;
97 return cc;
98 }
99
100 static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
101 {
102 int cc;
103
104 while (1) {
105 cc = __pcpu_sigp(addr, order, parm, NULL);
106 if (cc != SIGP_CC_BUSY)
107 return cc;
108 cpu_relax();
109 }
110 }
111
112 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
113 {
114 int cc, retry;
115
116 for (retry = 0; ; retry++) {
117 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
118 if (cc != SIGP_CC_BUSY)
119 break;
120 if (retry >= 3)
121 udelay(10);
122 }
123 return cc;
124 }
125
126 static inline int pcpu_stopped(struct pcpu *pcpu)
127 {
128 u32 uninitialized_var(status);
129
130 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
131 0, &status) != SIGP_CC_STATUS_STORED)
132 return 0;
133 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
134 }
135
136 static inline int pcpu_running(struct pcpu *pcpu)
137 {
138 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
139 0, NULL) != SIGP_CC_STATUS_STORED)
140 return 1;
141 /* Status stored condition code is equivalent to cpu not running. */
142 return 0;
143 }
144
145 /*
146 * Find struct pcpu by cpu address.
147 */
148 static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
149 {
150 int cpu;
151
152 for_each_cpu(cpu, mask)
153 if (pcpu_devices[cpu].address == address)
154 return pcpu_devices + cpu;
155 return NULL;
156 }
157
158 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
159 {
160 int order;
161
162 set_bit(ec_bit, &pcpu->ec_mask);
163 order = pcpu_running(pcpu) ?
164 SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
165 pcpu_sigp_retry(pcpu, order, 0);
166 }
167
168 static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
169 {
170 struct _lowcore *lc;
171
172 if (pcpu != &pcpu_devices[0]) {
173 pcpu->lowcore = (struct _lowcore *)
174 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
175 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
176 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
177 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
178 goto out;
179 }
180 lc = pcpu->lowcore;
181 memcpy(lc, &S390_lowcore, 512);
182 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
183 lc->async_stack = pcpu->async_stack + ASYNC_SIZE
184 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
185 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
186 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
187 lc->cpu_nr = cpu;
188 #ifndef CONFIG_64BIT
189 if (MACHINE_HAS_IEEE) {
190 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
191 if (!lc->extended_save_area_addr)
192 goto out;
193 }
194 #else
195 if (vdso_alloc_per_cpu(lc))
196 goto out;
197 #endif
198 lowcore_ptr[cpu] = lc;
199 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
200 return 0;
201 out:
202 if (pcpu != &pcpu_devices[0]) {
203 free_page(pcpu->panic_stack);
204 free_pages(pcpu->async_stack, ASYNC_ORDER);
205 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
206 }
207 return -ENOMEM;
208 }
209
210 #ifdef CONFIG_HOTPLUG_CPU
211
212 static void pcpu_free_lowcore(struct pcpu *pcpu)
213 {
214 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
215 lowcore_ptr[pcpu - pcpu_devices] = NULL;
216 #ifndef CONFIG_64BIT
217 if (MACHINE_HAS_IEEE) {
218 struct _lowcore *lc = pcpu->lowcore;
219
220 free_page((unsigned long) lc->extended_save_area_addr);
221 lc->extended_save_area_addr = 0;
222 }
223 #else
224 vdso_free_per_cpu(pcpu->lowcore);
225 #endif
226 if (pcpu != &pcpu_devices[0]) {
227 free_page(pcpu->panic_stack);
228 free_pages(pcpu->async_stack, ASYNC_ORDER);
229 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
230 }
231 }
232
233 #endif /* CONFIG_HOTPLUG_CPU */
234
235 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
236 {
237 struct _lowcore *lc = pcpu->lowcore;
238
239 atomic_inc(&init_mm.context.attach_count);
240 lc->cpu_nr = cpu;
241 lc->percpu_offset = __per_cpu_offset[cpu];
242 lc->kernel_asce = S390_lowcore.kernel_asce;
243 lc->machine_flags = S390_lowcore.machine_flags;
244 lc->ftrace_func = S390_lowcore.ftrace_func;
245 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
246 __ctl_store(lc->cregs_save_area, 0, 15);
247 save_access_regs((unsigned int *) lc->access_regs_save_area);
248 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
249 MAX_FACILITY_BIT/8);
250 }
251
252 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
253 {
254 struct _lowcore *lc = pcpu->lowcore;
255 struct thread_info *ti = task_thread_info(tsk);
256
257 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
258 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
259 lc->thread_info = (unsigned long) task_thread_info(tsk);
260 lc->current_task = (unsigned long) tsk;
261 lc->user_timer = ti->user_timer;
262 lc->system_timer = ti->system_timer;
263 lc->steal_timer = 0;
264 }
265
266 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
267 {
268 struct _lowcore *lc = pcpu->lowcore;
269
270 lc->restart_stack = lc->kernel_stack;
271 lc->restart_fn = (unsigned long) func;
272 lc->restart_data = (unsigned long) data;
273 lc->restart_source = -1UL;
274 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
275 }
276
277 /*
278 * Call function via PSW restart on pcpu and stop the current cpu.
279 */
280 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
281 void *data, unsigned long stack)
282 {
283 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
284 unsigned long source_cpu = stap();
285
286 __load_psw_mask(psw_kernel_bits);
287 if (pcpu->address == source_cpu)
288 func(data); /* should not return */
289 /* Stop target cpu (if func returns this stops the current cpu). */
290 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
291 /* Restart func on the target cpu and stop the current cpu. */
292 mem_assign_absolute(lc->restart_stack, stack);
293 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
294 mem_assign_absolute(lc->restart_data, (unsigned long) data);
295 mem_assign_absolute(lc->restart_source, source_cpu);
296 asm volatile(
297 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
298 " brc 2,0b # busy, try again\n"
299 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
300 " brc 2,1b # busy, try again\n"
301 : : "d" (pcpu->address), "d" (source_cpu),
302 "K" (SIGP_RESTART), "K" (SIGP_STOP)
303 : "0", "1", "cc");
304 for (;;) ;
305 }
306
307 /*
308 * Call function on an online CPU.
309 */
310 void smp_call_online_cpu(void (*func)(void *), void *data)
311 {
312 struct pcpu *pcpu;
313
314 /* Use the current cpu if it is online. */
315 pcpu = pcpu_find_address(cpu_online_mask, stap());
316 if (!pcpu)
317 /* Use the first online cpu. */
318 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
319 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
320 }
321
322 /*
323 * Call function on the ipl CPU.
324 */
325 void smp_call_ipl_cpu(void (*func)(void *), void *data)
326 {
327 pcpu_delegate(&pcpu_devices[0], func, data,
328 pcpu_devices->panic_stack + PAGE_SIZE);
329 }
330
331 int smp_find_processor_id(u16 address)
332 {
333 int cpu;
334
335 for_each_present_cpu(cpu)
336 if (pcpu_devices[cpu].address == address)
337 return cpu;
338 return -1;
339 }
340
341 int smp_vcpu_scheduled(int cpu)
342 {
343 return pcpu_running(pcpu_devices + cpu);
344 }
345
346 void smp_yield(void)
347 {
348 if (MACHINE_HAS_DIAG44)
349 asm volatile("diag 0,0,0x44");
350 }
351
352 void smp_yield_cpu(int cpu)
353 {
354 if (MACHINE_HAS_DIAG9C)
355 asm volatile("diag %0,0,0x9c"
356 : : "d" (pcpu_devices[cpu].address));
357 else if (MACHINE_HAS_DIAG44)
358 asm volatile("diag 0,0,0x44");
359 }
360
361 /*
362 * Send cpus emergency shutdown signal. This gives the cpus the
363 * opportunity to complete outstanding interrupts.
364 */
365 void smp_emergency_stop(cpumask_t *cpumask)
366 {
367 u64 end;
368 int cpu;
369
370 end = get_tod_clock() + (1000000UL << 12);
371 for_each_cpu(cpu, cpumask) {
372 struct pcpu *pcpu = pcpu_devices + cpu;
373 set_bit(ec_stop_cpu, &pcpu->ec_mask);
374 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
375 0, NULL) == SIGP_CC_BUSY &&
376 get_tod_clock() < end)
377 cpu_relax();
378 }
379 while (get_tod_clock() < end) {
380 for_each_cpu(cpu, cpumask)
381 if (pcpu_stopped(pcpu_devices + cpu))
382 cpumask_clear_cpu(cpu, cpumask);
383 if (cpumask_empty(cpumask))
384 break;
385 cpu_relax();
386 }
387 }
388
389 /*
390 * Stop all cpus but the current one.
391 */
392 void smp_send_stop(void)
393 {
394 cpumask_t cpumask;
395 int cpu;
396
397 /* Disable all interrupts/machine checks */
398 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
399 trace_hardirqs_off();
400
401 debug_set_critical();
402 cpumask_copy(&cpumask, cpu_online_mask);
403 cpumask_clear_cpu(smp_processor_id(), &cpumask);
404
405 if (oops_in_progress)
406 smp_emergency_stop(&cpumask);
407
408 /* stop all processors */
409 for_each_cpu(cpu, &cpumask) {
410 struct pcpu *pcpu = pcpu_devices + cpu;
411 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
412 while (!pcpu_stopped(pcpu))
413 cpu_relax();
414 }
415 }
416
417 /*
418 * Stop the current cpu.
419 */
420 void smp_stop_cpu(void)
421 {
422 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
423 for (;;) ;
424 }
425
426 /*
427 * This is the main routine where commands issued by other
428 * cpus are handled.
429 */
430 static void smp_handle_ext_call(void)
431 {
432 unsigned long bits;
433
434 /* handle bit signal external calls */
435 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
436 if (test_bit(ec_stop_cpu, &bits))
437 smp_stop_cpu();
438 if (test_bit(ec_schedule, &bits))
439 scheduler_ipi();
440 if (test_bit(ec_call_function_single, &bits))
441 generic_smp_call_function_single_interrupt();
442 }
443
444 static void do_ext_call_interrupt(struct ext_code ext_code,
445 unsigned int param32, unsigned long param64)
446 {
447 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
448 smp_handle_ext_call();
449 }
450
451 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
452 {
453 int cpu;
454
455 for_each_cpu(cpu, mask)
456 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
457 }
458
459 void arch_send_call_function_single_ipi(int cpu)
460 {
461 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
462 }
463
464 #ifndef CONFIG_64BIT
465 /*
466 * this function sends a 'purge tlb' signal to another CPU.
467 */
468 static void smp_ptlb_callback(void *info)
469 {
470 __tlb_flush_local();
471 }
472
473 void smp_ptlb_all(void)
474 {
475 on_each_cpu(smp_ptlb_callback, NULL, 1);
476 }
477 EXPORT_SYMBOL(smp_ptlb_all);
478 #endif /* ! CONFIG_64BIT */
479
480 /*
481 * this function sends a 'reschedule' IPI to another CPU.
482 * it goes straight through and wastes no time serializing
483 * anything. Worst case is that we lose a reschedule ...
484 */
485 void smp_send_reschedule(int cpu)
486 {
487 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
488 }
489
490 /*
491 * parameter area for the set/clear control bit callbacks
492 */
493 struct ec_creg_mask_parms {
494 unsigned long orval;
495 unsigned long andval;
496 int cr;
497 };
498
499 /*
500 * callback for setting/clearing control bits
501 */
502 static void smp_ctl_bit_callback(void *info)
503 {
504 struct ec_creg_mask_parms *pp = info;
505 unsigned long cregs[16];
506
507 __ctl_store(cregs, 0, 15);
508 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
509 __ctl_load(cregs, 0, 15);
510 }
511
512 /*
513 * Set a bit in a control register of all cpus
514 */
515 void smp_ctl_set_bit(int cr, int bit)
516 {
517 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
518
519 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
520 }
521 EXPORT_SYMBOL(smp_ctl_set_bit);
522
523 /*
524 * Clear a bit in a control register of all cpus
525 */
526 void smp_ctl_clear_bit(int cr, int bit)
527 {
528 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
529
530 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
531 }
532 EXPORT_SYMBOL(smp_ctl_clear_bit);
533
534 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
535
536 struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
537 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
538
539 static void __init smp_get_save_area(int cpu, u16 address)
540 {
541 void *lc = pcpu_devices[0].lowcore;
542 struct save_area *save_area;
543
544 if (is_kdump_kernel())
545 return;
546 if (!OLDMEM_BASE && (address == boot_cpu_address ||
547 ipl_info.type != IPL_TYPE_FCP_DUMP))
548 return;
549 if (cpu >= NR_CPUS) {
550 pr_warning("CPU %i exceeds the maximum %i and is excluded "
551 "from the dump\n", cpu, NR_CPUS - 1);
552 return;
553 }
554 save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
555 if (!save_area)
556 panic("could not allocate memory for save area\n");
557 zfcpdump_save_areas[cpu] = save_area;
558 #ifdef CONFIG_CRASH_DUMP
559 if (address == boot_cpu_address) {
560 /* Copy the registers of the boot cpu. */
561 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
562 SAVE_AREA_BASE - PAGE_SIZE, 0);
563 return;
564 }
565 #endif
566 /* Get the registers of a non-boot cpu. */
567 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
568 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
569 }
570
571 int smp_store_status(int cpu)
572 {
573 struct pcpu *pcpu;
574
575 pcpu = pcpu_devices + cpu;
576 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
577 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
578 return -EIO;
579 return 0;
580 }
581
582 #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
583
584 static inline void smp_get_save_area(int cpu, u16 address) { }
585
586 #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
587
588 void smp_cpu_set_polarization(int cpu, int val)
589 {
590 pcpu_devices[cpu].polarization = val;
591 }
592
593 int smp_cpu_get_polarization(int cpu)
594 {
595 return pcpu_devices[cpu].polarization;
596 }
597
598 static struct sclp_cpu_info *smp_get_cpu_info(void)
599 {
600 static int use_sigp_detection;
601 struct sclp_cpu_info *info;
602 int address;
603
604 info = kzalloc(sizeof(*info), GFP_KERNEL);
605 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
606 use_sigp_detection = 1;
607 for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
608 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
609 SIGP_CC_NOT_OPERATIONAL)
610 continue;
611 info->cpu[info->configured].address = address;
612 info->configured++;
613 }
614 info->combined = info->configured;
615 }
616 return info;
617 }
618
619 static int __cpuinit smp_add_present_cpu(int cpu);
620
621 static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info,
622 int sysfs_add)
623 {
624 struct pcpu *pcpu;
625 cpumask_t avail;
626 int cpu, nr, i;
627
628 nr = 0;
629 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
630 cpu = cpumask_first(&avail);
631 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
632 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
633 continue;
634 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
635 continue;
636 pcpu = pcpu_devices + cpu;
637 pcpu->address = info->cpu[i].address;
638 pcpu->state = (i >= info->configured) ?
639 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
640 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
641 set_cpu_present(cpu, true);
642 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
643 set_cpu_present(cpu, false);
644 else
645 nr++;
646 cpu = cpumask_next(cpu, &avail);
647 }
648 return nr;
649 }
650
651 static void __init smp_detect_cpus(void)
652 {
653 unsigned int cpu, c_cpus, s_cpus;
654 struct sclp_cpu_info *info;
655
656 info = smp_get_cpu_info();
657 if (!info)
658 panic("smp_detect_cpus failed to allocate memory\n");
659 if (info->has_cpu_type) {
660 for (cpu = 0; cpu < info->combined; cpu++) {
661 if (info->cpu[cpu].address != boot_cpu_address)
662 continue;
663 /* The boot cpu dictates the cpu type. */
664 boot_cpu_type = info->cpu[cpu].type;
665 break;
666 }
667 }
668 c_cpus = s_cpus = 0;
669 for (cpu = 0; cpu < info->combined; cpu++) {
670 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
671 continue;
672 if (cpu < info->configured) {
673 smp_get_save_area(c_cpus, info->cpu[cpu].address);
674 c_cpus++;
675 } else
676 s_cpus++;
677 }
678 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
679 get_online_cpus();
680 __smp_rescan_cpus(info, 0);
681 put_online_cpus();
682 kfree(info);
683 }
684
685 /*
686 * Activate a secondary processor.
687 */
688 static void __cpuinit smp_start_secondary(void *cpuvoid)
689 {
690 S390_lowcore.last_update_clock = get_tod_clock();
691 S390_lowcore.restart_stack = (unsigned long) restart_stack;
692 S390_lowcore.restart_fn = (unsigned long) do_restart;
693 S390_lowcore.restart_data = 0;
694 S390_lowcore.restart_source = -1UL;
695 restore_access_regs(S390_lowcore.access_regs_save_area);
696 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
697 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
698 cpu_init();
699 preempt_disable();
700 init_cpu_timer();
701 init_cpu_vtimer();
702 pfault_init();
703 notify_cpu_starting(smp_processor_id());
704 set_cpu_online(smp_processor_id(), true);
705 inc_irq_stat(CPU_RST);
706 local_irq_enable();
707 cpu_startup_entry(CPUHP_ONLINE);
708 }
709
710 /* Upping and downing of CPUs */
711 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
712 {
713 struct pcpu *pcpu;
714 int rc;
715
716 pcpu = pcpu_devices + cpu;
717 if (pcpu->state != CPU_STATE_CONFIGURED)
718 return -EIO;
719 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
720 SIGP_CC_ORDER_CODE_ACCEPTED)
721 return -EIO;
722
723 rc = pcpu_alloc_lowcore(pcpu, cpu);
724 if (rc)
725 return rc;
726 pcpu_prepare_secondary(pcpu, cpu);
727 pcpu_attach_task(pcpu, tidle);
728 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
729 while (!cpu_online(cpu))
730 cpu_relax();
731 return 0;
732 }
733
734 static int __init setup_possible_cpus(char *s)
735 {
736 int max, cpu;
737
738 if (kstrtoint(s, 0, &max) < 0)
739 return 0;
740 init_cpu_possible(cpumask_of(0));
741 for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
742 set_cpu_possible(cpu, true);
743 return 0;
744 }
745 early_param("possible_cpus", setup_possible_cpus);
746
747 #ifdef CONFIG_HOTPLUG_CPU
748
749 int __cpu_disable(void)
750 {
751 unsigned long cregs[16];
752
753 /* Handle possible pending IPIs */
754 smp_handle_ext_call();
755 set_cpu_online(smp_processor_id(), false);
756 /* Disable pseudo page faults on this cpu. */
757 pfault_fini();
758 /* Disable interrupt sources via control register. */
759 __ctl_store(cregs, 0, 15);
760 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
761 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
762 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
763 __ctl_load(cregs, 0, 15);
764 return 0;
765 }
766
767 void __cpu_die(unsigned int cpu)
768 {
769 struct pcpu *pcpu;
770
771 /* Wait until target cpu is down */
772 pcpu = pcpu_devices + cpu;
773 while (!pcpu_stopped(pcpu))
774 cpu_relax();
775 pcpu_free_lowcore(pcpu);
776 atomic_dec(&init_mm.context.attach_count);
777 }
778
779 void __noreturn cpu_die(void)
780 {
781 idle_task_exit();
782 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
783 for (;;) ;
784 }
785
786 #endif /* CONFIG_HOTPLUG_CPU */
787
788 void __init smp_prepare_cpus(unsigned int max_cpus)
789 {
790 /* request the 0x1201 emergency signal external interrupt */
791 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
792 panic("Couldn't request external interrupt 0x1201");
793 /* request the 0x1202 external call external interrupt */
794 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
795 panic("Couldn't request external interrupt 0x1202");
796 smp_detect_cpus();
797 }
798
799 void __init smp_prepare_boot_cpu(void)
800 {
801 struct pcpu *pcpu = pcpu_devices;
802
803 boot_cpu_address = stap();
804 pcpu->state = CPU_STATE_CONFIGURED;
805 pcpu->address = boot_cpu_address;
806 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
807 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
808 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
809 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
810 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
811 S390_lowcore.percpu_offset = __per_cpu_offset[0];
812 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
813 set_cpu_present(0, true);
814 set_cpu_online(0, true);
815 }
816
817 void __init smp_cpus_done(unsigned int max_cpus)
818 {
819 }
820
821 void __init smp_setup_processor_id(void)
822 {
823 S390_lowcore.cpu_nr = 0;
824 }
825
826 /*
827 * the frequency of the profiling timer can be changed
828 * by writing a multiplier value into /proc/profile.
829 *
830 * usually you want to run this on all CPUs ;)
831 */
832 int setup_profiling_timer(unsigned int multiplier)
833 {
834 return 0;
835 }
836
837 #ifdef CONFIG_HOTPLUG_CPU
838 static ssize_t cpu_configure_show(struct device *dev,
839 struct device_attribute *attr, char *buf)
840 {
841 ssize_t count;
842
843 mutex_lock(&smp_cpu_state_mutex);
844 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
845 mutex_unlock(&smp_cpu_state_mutex);
846 return count;
847 }
848
849 static ssize_t cpu_configure_store(struct device *dev,
850 struct device_attribute *attr,
851 const char *buf, size_t count)
852 {
853 struct pcpu *pcpu;
854 int cpu, val, rc;
855 char delim;
856
857 if (sscanf(buf, "%d %c", &val, &delim) != 1)
858 return -EINVAL;
859 if (val != 0 && val != 1)
860 return -EINVAL;
861 get_online_cpus();
862 mutex_lock(&smp_cpu_state_mutex);
863 rc = -EBUSY;
864 /* disallow configuration changes of online cpus and cpu 0 */
865 cpu = dev->id;
866 if (cpu_online(cpu) || cpu == 0)
867 goto out;
868 pcpu = pcpu_devices + cpu;
869 rc = 0;
870 switch (val) {
871 case 0:
872 if (pcpu->state != CPU_STATE_CONFIGURED)
873 break;
874 rc = sclp_cpu_deconfigure(pcpu->address);
875 if (rc)
876 break;
877 pcpu->state = CPU_STATE_STANDBY;
878 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
879 topology_expect_change();
880 break;
881 case 1:
882 if (pcpu->state != CPU_STATE_STANDBY)
883 break;
884 rc = sclp_cpu_configure(pcpu->address);
885 if (rc)
886 break;
887 pcpu->state = CPU_STATE_CONFIGURED;
888 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
889 topology_expect_change();
890 break;
891 default:
892 break;
893 }
894 out:
895 mutex_unlock(&smp_cpu_state_mutex);
896 put_online_cpus();
897 return rc ? rc : count;
898 }
899 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
900 #endif /* CONFIG_HOTPLUG_CPU */
901
902 static ssize_t show_cpu_address(struct device *dev,
903 struct device_attribute *attr, char *buf)
904 {
905 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
906 }
907 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
908
909 static struct attribute *cpu_common_attrs[] = {
910 #ifdef CONFIG_HOTPLUG_CPU
911 &dev_attr_configure.attr,
912 #endif
913 &dev_attr_address.attr,
914 NULL,
915 };
916
917 static struct attribute_group cpu_common_attr_group = {
918 .attrs = cpu_common_attrs,
919 };
920
921 static ssize_t show_idle_count(struct device *dev,
922 struct device_attribute *attr, char *buf)
923 {
924 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
925 unsigned long long idle_count;
926 unsigned int sequence;
927
928 do {
929 sequence = ACCESS_ONCE(idle->sequence);
930 idle_count = ACCESS_ONCE(idle->idle_count);
931 if (ACCESS_ONCE(idle->clock_idle_enter))
932 idle_count++;
933 } while ((sequence & 1) || (idle->sequence != sequence));
934 return sprintf(buf, "%llu\n", idle_count);
935 }
936 static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
937
938 static ssize_t show_idle_time(struct device *dev,
939 struct device_attribute *attr, char *buf)
940 {
941 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
942 unsigned long long now, idle_time, idle_enter, idle_exit;
943 unsigned int sequence;
944
945 do {
946 now = get_tod_clock();
947 sequence = ACCESS_ONCE(idle->sequence);
948 idle_time = ACCESS_ONCE(idle->idle_time);
949 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
950 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
951 } while ((sequence & 1) || (idle->sequence != sequence));
952 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
953 return sprintf(buf, "%llu\n", idle_time >> 12);
954 }
955 static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
956
957 static struct attribute *cpu_online_attrs[] = {
958 &dev_attr_idle_count.attr,
959 &dev_attr_idle_time_us.attr,
960 NULL,
961 };
962
963 static struct attribute_group cpu_online_attr_group = {
964 .attrs = cpu_online_attrs,
965 };
966
967 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
968 unsigned long action, void *hcpu)
969 {
970 unsigned int cpu = (unsigned int)(long)hcpu;
971 struct cpu *c = &pcpu_devices[cpu].cpu;
972 struct device *s = &c->dev;
973 int err = 0;
974
975 switch (action & ~CPU_TASKS_FROZEN) {
976 case CPU_ONLINE:
977 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
978 break;
979 case CPU_DEAD:
980 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
981 break;
982 }
983 return notifier_from_errno(err);
984 }
985
986 static int __cpuinit smp_add_present_cpu(int cpu)
987 {
988 struct cpu *c = &pcpu_devices[cpu].cpu;
989 struct device *s = &c->dev;
990 int rc;
991
992 c->hotpluggable = 1;
993 rc = register_cpu(c, cpu);
994 if (rc)
995 goto out;
996 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
997 if (rc)
998 goto out_cpu;
999 if (cpu_online(cpu)) {
1000 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1001 if (rc)
1002 goto out_online;
1003 }
1004 rc = topology_cpu_init(c);
1005 if (rc)
1006 goto out_topology;
1007 return 0;
1008
1009 out_topology:
1010 if (cpu_online(cpu))
1011 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1012 out_online:
1013 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1014 out_cpu:
1015 #ifdef CONFIG_HOTPLUG_CPU
1016 unregister_cpu(c);
1017 #endif
1018 out:
1019 return rc;
1020 }
1021
1022 #ifdef CONFIG_HOTPLUG_CPU
1023
1024 int __ref smp_rescan_cpus(void)
1025 {
1026 struct sclp_cpu_info *info;
1027 int nr;
1028
1029 info = smp_get_cpu_info();
1030 if (!info)
1031 return -ENOMEM;
1032 get_online_cpus();
1033 mutex_lock(&smp_cpu_state_mutex);
1034 nr = __smp_rescan_cpus(info, 1);
1035 mutex_unlock(&smp_cpu_state_mutex);
1036 put_online_cpus();
1037 kfree(info);
1038 if (nr)
1039 topology_schedule_update();
1040 return 0;
1041 }
1042
1043 static ssize_t __ref rescan_store(struct device *dev,
1044 struct device_attribute *attr,
1045 const char *buf,
1046 size_t count)
1047 {
1048 int rc;
1049
1050 rc = smp_rescan_cpus();
1051 return rc ? rc : count;
1052 }
1053 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1054 #endif /* CONFIG_HOTPLUG_CPU */
1055
1056 static int __init s390_smp_init(void)
1057 {
1058 int cpu, rc;
1059
1060 hotcpu_notifier(smp_cpu_notify, 0);
1061 #ifdef CONFIG_HOTPLUG_CPU
1062 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1063 if (rc)
1064 return rc;
1065 #endif
1066 for_each_present_cpu(cpu) {
1067 rc = smp_add_present_cpu(cpu);
1068 if (rc)
1069 return rc;
1070 }
1071 return 0;
1072 }
1073 subsys_initcall(s390_smp_init);
This page took 0.054458 seconds and 5 git commands to generate.