[S390] dasd: Add ipldev parameter.
[deliverable/linux.git] / arch / s390 / kernel / smp.c
CommitLineData
1da177e4
LT
1/*
2 * arch/s390/kernel/smp.c
3 *
255acee7 4 * Copyright (C) IBM Corp. 1999,2006
1da177e4
LT
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
8 *
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
1da177e4
LT
25#include <linux/mm.h>
26#include <linux/spinlock.h>
27#include <linux/kernel_stat.h>
28#include <linux/smp_lock.h>
1da177e4
LT
29#include <linux/delay.h>
30#include <linux/cache.h>
31#include <linux/interrupt.h>
32#include <linux/cpu.h>
2b67fc46 33#include <linux/timex.h>
46b05d26 34#include <asm/ipl.h>
2b67fc46 35#include <asm/setup.h>
1da177e4
LT
36#include <asm/sigp.h>
37#include <asm/pgalloc.h>
38#include <asm/irq.h>
39#include <asm/s390_ext.h>
40#include <asm/cpcmd.h>
41#include <asm/tlbflush.h>
2b67fc46 42#include <asm/timer.h>
1da177e4 43
1da177e4
LT
44extern volatile int __cpu_logical_map[];
45
46/*
47 * An array with a pointer the lowcore of every CPU.
48 */
49
50struct _lowcore *lowcore_ptr[NR_CPUS];
51
255acee7
HC
52cpumask_t cpu_online_map = CPU_MASK_NONE;
53cpumask_t cpu_possible_map = CPU_MASK_NONE;
1da177e4
LT
54
55static struct task_struct *current_set[NR_CPUS];
56
1da177e4 57static void smp_ext_bitcall(int, ec_bit_sig);
1da177e4
LT
58
59/*
63db6e8d
JG
60 * Structure and data for __smp_call_function_map(). This is designed to
61 * minimise static memory requirements. It also looks cleaner.
1da177e4
LT
62 */
63static DEFINE_SPINLOCK(call_lock);
64
65struct call_data_struct {
66 void (*func) (void *info);
67 void *info;
63db6e8d
JG
68 cpumask_t started;
69 cpumask_t finished;
1da177e4
LT
70 int wait;
71};
72
73static struct call_data_struct * call_data;
74
75/*
76 * 'Call function' interrupt callback
77 */
78static void do_call_function(void)
79{
80 void (*func) (void *info) = call_data->func;
81 void *info = call_data->info;
82 int wait = call_data->wait;
83
63db6e8d 84 cpu_set(smp_processor_id(), call_data->started);
1da177e4
LT
85 (*func)(info);
86 if (wait)
63db6e8d 87 cpu_set(smp_processor_id(), call_data->finished);;
1da177e4
LT
88}
89
63db6e8d
JG
90static void __smp_call_function_map(void (*func) (void *info), void *info,
91 int nonatomic, int wait, cpumask_t map)
1da177e4
LT
92{
93 struct call_data_struct data;
63db6e8d 94 int cpu, local = 0;
1da177e4 95
63db6e8d 96 /*
25864162 97 * Can deadlock when interrupts are disabled or if in wrong context.
63db6e8d 98 */
25864162 99 WARN_ON(irqs_disabled() || in_irq());
1da177e4 100
63db6e8d
JG
101 /*
102 * Check for local function call. We have to have the same call order
103 * as in on_each_cpu() because of machine_restart_smp().
104 */
105 if (cpu_isset(smp_processor_id(), map)) {
106 local = 1;
107 cpu_clear(smp_processor_id(), map);
108 }
109
110 cpus_and(map, map, cpu_online_map);
111 if (cpus_empty(map))
112 goto out;
1da177e4
LT
113
114 data.func = func;
115 data.info = info;
63db6e8d 116 data.started = CPU_MASK_NONE;
1da177e4
LT
117 data.wait = wait;
118 if (wait)
63db6e8d 119 data.finished = CPU_MASK_NONE;
1da177e4 120
0ec67667 121 spin_lock_bh(&call_lock);
1da177e4 122 call_data = &data;
63db6e8d
JG
123
124 for_each_cpu_mask(cpu, map)
125 smp_ext_bitcall(cpu, ec_call_function);
1da177e4
LT
126
127 /* Wait for response */
63db6e8d 128 while (!cpus_equal(map, data.started))
1da177e4
LT
129 cpu_relax();
130
131 if (wait)
63db6e8d 132 while (!cpus_equal(map, data.finished))
1da177e4 133 cpu_relax();
63db6e8d 134
0ec67667 135 spin_unlock_bh(&call_lock);
1da177e4 136
63db6e8d
JG
137out:
138 local_irq_disable();
139 if (local)
140 func(info);
141 local_irq_enable();
1da177e4
LT
142}
143
144/*
63db6e8d
JG
145 * smp_call_function:
146 * @func: the function to run; this must be fast and non-blocking
147 * @info: an arbitrary pointer to pass to the function
148 * @nonatomic: unused
149 * @wait: if true, wait (atomically) until function has completed on other CPUs
1da177e4 150 *
63db6e8d 151 * Run a function on all other CPUs.
1da177e4 152 *
63db6e8d 153 * You must not call this function with disabled interrupts or from a
25864162 154 * hardware interrupt handler. You may call it from a bottom half.
1da177e4 155 */
63db6e8d
JG
156int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
157 int wait)
1da177e4 158{
63db6e8d 159 cpumask_t map;
1da177e4 160
25864162 161 preempt_disable();
63db6e8d
JG
162 map = cpu_online_map;
163 cpu_clear(smp_processor_id(), map);
164 __smp_call_function_map(func, info, nonatomic, wait, map);
25864162 165 preempt_enable();
63db6e8d
JG
166 return 0;
167}
168EXPORT_SYMBOL(smp_call_function);
1da177e4 169
63db6e8d
JG
170/*
171 * smp_call_function_on:
172 * @func: the function to run; this must be fast and non-blocking
173 * @info: an arbitrary pointer to pass to the function
174 * @nonatomic: unused
175 * @wait: if true, wait (atomically) until function has completed on other CPUs
176 * @cpu: the CPU where func should run
177 *
178 * Run a function on one processor.
179 *
180 * You must not call this function with disabled interrupts or from a
25864162 181 * hardware interrupt handler. You may call it from a bottom half.
63db6e8d
JG
182 */
183int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
184 int wait, int cpu)
185{
186 cpumask_t map = CPU_MASK_NONE;
1da177e4 187
25864162 188 preempt_disable();
63db6e8d
JG
189 cpu_set(cpu, map);
190 __smp_call_function_map(func, info, nonatomic, wait, map);
25864162 191 preempt_enable();
1da177e4
LT
192 return 0;
193}
194EXPORT_SYMBOL(smp_call_function_on);
195
4d284cac 196static void do_send_stop(void)
1da177e4
LT
197{
198 int cpu, rc;
199
200 /* stop all processors */
201 for_each_online_cpu(cpu) {
202 if (cpu == smp_processor_id())
203 continue;
204 do {
205 rc = signal_processor(cpu, sigp_stop);
206 } while (rc == sigp_busy);
207 }
208}
209
4d284cac 210static void do_store_status(void)
1da177e4
LT
211{
212 int cpu, rc;
213
214 /* store status of all processors in their lowcores (real 0) */
215 for_each_online_cpu(cpu) {
216 if (cpu == smp_processor_id())
217 continue;
218 do {
219 rc = signal_processor_p(
220 (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
221 sigp_store_status_at_address);
222 } while(rc == sigp_busy);
223 }
224}
225
4d284cac 226static void do_wait_for_stop(void)
c6b5b847
HC
227{
228 int cpu;
229
230 /* Wait for all other cpus to enter stopped state */
231 for_each_online_cpu(cpu) {
232 if (cpu == smp_processor_id())
233 continue;
234 while(!smp_cpu_not_running(cpu))
235 cpu_relax();
236 }
237}
238
1da177e4
LT
239/*
240 * this function sends a 'stop' sigp to all other CPUs in the system.
241 * it goes straight through.
242 */
243void smp_send_stop(void)
244{
c6b5b847 245 /* Disable all interrupts/machine checks */
c1821c2e 246 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
c6b5b847 247
1da177e4
LT
248 /* write magic number to zero page (absolute 0) */
249 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
250
251 /* stop other processors. */
252 do_send_stop();
253
c6b5b847
HC
254 /* wait until other processors are stopped */
255 do_wait_for_stop();
256
1da177e4
LT
257 /* store status of other processors. */
258 do_store_status();
259}
260
261/*
262 * Reboot, halt and power_off routines for SMP.
263 */
264
1da177e4
LT
265void machine_restart_smp(char * __unused)
266{
c6b5b847
HC
267 smp_send_stop();
268 do_reipl();
1da177e4
LT
269}
270
271void machine_halt_smp(void)
272{
c6b5b847
HC
273 smp_send_stop();
274 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
275 __cpcmd(vmhalt_cmd, NULL, 0, NULL);
276 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
277 for (;;);
1da177e4
LT
278}
279
280void machine_power_off_smp(void)
281{
c6b5b847
HC
282 smp_send_stop();
283 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
284 __cpcmd(vmpoff_cmd, NULL, 0, NULL);
285 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
286 for (;;);
1da177e4
LT
287}
288
289/*
290 * This is the main routine where commands issued by other
291 * cpus are handled.
292 */
293
2b67fc46 294static void do_ext_call_interrupt(__u16 code)
1da177e4
LT
295{
296 unsigned long bits;
297
298 /*
299 * handle bit signal external calls
300 *
301 * For the ec_schedule signal we have to do nothing. All the work
302 * is done automatically when we return from the interrupt.
303 */
304 bits = xchg(&S390_lowcore.ext_call_fast, 0);
305
306 if (test_bit(ec_call_function, &bits))
307 do_call_function();
308}
309
310/*
311 * Send an external call sigp to another cpu and return without waiting
312 * for its completion.
313 */
314static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
315{
316 /*
317 * Set signaling bit in lowcore of target cpu and kick it
318 */
319 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
99b2d8df 320 while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
1da177e4
LT
321 udelay(10);
322}
323
347a8dc3 324#ifndef CONFIG_64BIT
1da177e4
LT
325/*
326 * this function sends a 'purge tlb' signal to another CPU.
327 */
328void smp_ptlb_callback(void *info)
329{
330 local_flush_tlb();
331}
332
333void smp_ptlb_all(void)
334{
335 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
336}
337EXPORT_SYMBOL(smp_ptlb_all);
347a8dc3 338#endif /* ! CONFIG_64BIT */
1da177e4
LT
339
340/*
341 * this function sends a 'reschedule' IPI to another CPU.
342 * it goes straight through and wastes no time serializing
343 * anything. Worst case is that we lose a reschedule ...
344 */
345void smp_send_reschedule(int cpu)
346{
347 smp_ext_bitcall(cpu, ec_schedule);
348}
349
350/*
351 * parameter area for the set/clear control bit callbacks
352 */
94c12cc7 353struct ec_creg_mask_parms {
1da177e4
LT
354 unsigned long orvals[16];
355 unsigned long andvals[16];
94c12cc7 356};
1da177e4
LT
357
358/*
359 * callback for setting/clearing control bits
360 */
2b67fc46 361static void smp_ctl_bit_callback(void *info) {
94c12cc7 362 struct ec_creg_mask_parms *pp = info;
1da177e4
LT
363 unsigned long cregs[16];
364 int i;
365
94c12cc7
MS
366 __ctl_store(cregs, 0, 15);
367 for (i = 0; i <= 15; i++)
1da177e4 368 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
94c12cc7 369 __ctl_load(cregs, 0, 15);
1da177e4
LT
370}
371
372/*
373 * Set a bit in a control register of all cpus
374 */
94c12cc7
MS
375void smp_ctl_set_bit(int cr, int bit)
376{
377 struct ec_creg_mask_parms parms;
1da177e4 378
94c12cc7
MS
379 memset(&parms.orvals, 0, sizeof(parms.orvals));
380 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
1da177e4 381 parms.orvals[cr] = 1 << bit;
94c12cc7 382 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
1da177e4
LT
383}
384
385/*
386 * Clear a bit in a control register of all cpus
387 */
94c12cc7
MS
388void smp_ctl_clear_bit(int cr, int bit)
389{
390 struct ec_creg_mask_parms parms;
1da177e4 391
94c12cc7
MS
392 memset(&parms.orvals, 0, sizeof(parms.orvals));
393 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
1da177e4 394 parms.andvals[cr] = ~(1L << bit);
94c12cc7 395 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
1da177e4
LT
396}
397
398/*
399 * Lets check how many CPUs we have.
400 */
401
255acee7
HC
402static unsigned int
403__init smp_count_cpus(void)
1da177e4 404{
255acee7 405 unsigned int cpu, num_cpus;
1da177e4
LT
406 __u16 boot_cpu_addr;
407
408 /*
409 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
410 */
411
412 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
413 current_thread_info()->cpu = 0;
414 num_cpus = 1;
255acee7 415 for (cpu = 0; cpu <= 65535; cpu++) {
1da177e4
LT
416 if ((__u16) cpu == boot_cpu_addr)
417 continue;
255acee7
HC
418 __cpu_logical_map[1] = (__u16) cpu;
419 if (signal_processor(1, sigp_sense) ==
1da177e4
LT
420 sigp_not_operational)
421 continue;
1da177e4
LT
422 num_cpus++;
423 }
424
1da177e4
LT
425 printk("Detected %d CPU's\n",(int) num_cpus);
426 printk("Boot cpu address %2X\n", boot_cpu_addr);
255acee7
HC
427
428 return num_cpus;
1da177e4
LT
429}
430
431/*
432 * Activate a secondary processor.
433 */
1da177e4
LT
434int __devinit start_secondary(void *cpuvoid)
435{
436 /* Setup the cpu */
437 cpu_init();
5bfb5d69 438 preempt_disable();
d54853ef 439 /* Enable TOD clock interrupts on the secondary cpu. */
1da177e4
LT
440 init_cpu_timer();
441#ifdef CONFIG_VIRT_TIMER
d54853ef 442 /* Enable cpu timer interrupts on the secondary cpu. */
1da177e4
LT
443 init_cpu_vtimer();
444#endif
1da177e4 445 /* Enable pfault pseudo page faults on this cpu. */
29b08d2b
HC
446 pfault_init();
447
1da177e4
LT
448 /* Mark this cpu as online */
449 cpu_set(smp_processor_id(), cpu_online_map);
450 /* Switch on interrupts */
451 local_irq_enable();
452 /* Print info about this processor */
453 print_cpu_info(&S390_lowcore.cpu_data);
454 /* cpu_idle will call schedule for us */
455 cpu_idle();
456 return 0;
457}
458
459static void __init smp_create_idle(unsigned int cpu)
460{
461 struct task_struct *p;
462
463 /*
464 * don't care about the psw and regs settings since we'll never
465 * reschedule the forked task.
466 */
467 p = fork_idle(cpu);
468 if (IS_ERR(p))
469 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
470 current_set[cpu] = p;
471}
472
473/* Reserving and releasing of CPUs */
474
475static DEFINE_SPINLOCK(smp_reserve_lock);
476static int smp_cpu_reserved[NR_CPUS];
477
478int
479smp_get_cpu(cpumask_t cpu_mask)
480{
481 unsigned long flags;
482 int cpu;
483
484 spin_lock_irqsave(&smp_reserve_lock, flags);
485 /* Try to find an already reserved cpu. */
486 for_each_cpu_mask(cpu, cpu_mask) {
487 if (smp_cpu_reserved[cpu] != 0) {
488 smp_cpu_reserved[cpu]++;
489 /* Found one. */
490 goto out;
491 }
492 }
493 /* Reserve a new cpu from cpu_mask. */
494 for_each_cpu_mask(cpu, cpu_mask) {
495 if (cpu_online(cpu)) {
496 smp_cpu_reserved[cpu]++;
497 goto out;
498 }
499 }
500 cpu = -ENODEV;
501out:
502 spin_unlock_irqrestore(&smp_reserve_lock, flags);
503 return cpu;
504}
505
506void
507smp_put_cpu(int cpu)
508{
509 unsigned long flags;
510
511 spin_lock_irqsave(&smp_reserve_lock, flags);
512 smp_cpu_reserved[cpu]--;
513 spin_unlock_irqrestore(&smp_reserve_lock, flags);
514}
515
4d284cac 516static int
1da177e4
LT
517cpu_stopped(int cpu)
518{
519 __u32 status;
520
521 /* Check for stopped state */
522 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
523 if (status & 0x40)
524 return 1;
525 }
526 return 0;
527}
528
529/* Upping and downing of CPUs */
530
531int
532__cpu_up(unsigned int cpu)
533{
534 struct task_struct *idle;
535 struct _lowcore *cpu_lowcore;
536 struct stack_frame *sf;
537 sigp_ccode ccode;
538 int curr_cpu;
539
540 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
541 __cpu_logical_map[cpu] = (__u16) curr_cpu;
542 if (cpu_stopped(cpu))
543 break;
544 }
545
546 if (!cpu_stopped(cpu))
547 return -ENODEV;
548
549 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
550 cpu, sigp_set_prefix);
551 if (ccode){
552 printk("sigp_set_prefix failed for cpu %d "
553 "with condition code %d\n",
554 (int) cpu, (int) ccode);
555 return -EIO;
556 }
557
558 idle = current_set[cpu];
559 cpu_lowcore = lowcore_ptr[cpu];
560 cpu_lowcore->kernel_stack = (unsigned long)
30af7120 561 task_stack_page(idle) + (THREAD_SIZE);
1da177e4
LT
562 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
563 - sizeof(struct pt_regs)
564 - sizeof(struct stack_frame));
565 memset(sf, 0, sizeof(struct stack_frame));
566 sf->gprs[9] = (unsigned long) sf;
567 cpu_lowcore->save_area[15] = (unsigned long) sf;
568 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
94c12cc7
MS
569 asm volatile(
570 " stam 0,15,0(%0)"
571 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
1da177e4
LT
572 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
573 cpu_lowcore->current_task = (unsigned long) idle;
574 cpu_lowcore->cpu_data.cpu_nr = cpu;
575 eieio();
699ff13f
MR
576
577 while (signal_processor(cpu,sigp_restart) == sigp_busy)
578 udelay(10);
1da177e4
LT
579
580 while (!cpu_online(cpu))
581 cpu_relax();
582 return 0;
583}
584
255acee7 585static unsigned int __initdata additional_cpus;
37a33026 586static unsigned int __initdata possible_cpus;
255acee7
HC
587
588void __init smp_setup_cpu_possible_map(void)
589{
54330456 590 unsigned int phy_cpus, pos_cpus, cpu;
255acee7 591
54330456
HC
592 phy_cpus = smp_count_cpus();
593 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
255acee7 594
37a33026 595 if (possible_cpus)
54330456 596 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
255acee7 597
54330456 598 for (cpu = 0; cpu < pos_cpus; cpu++)
255acee7
HC
599 cpu_set(cpu, cpu_possible_map);
600
54330456
HC
601 phy_cpus = min(phy_cpus, pos_cpus);
602
603 for (cpu = 0; cpu < phy_cpus; cpu++)
604 cpu_set(cpu, cpu_present_map);
255acee7
HC
605}
606
607#ifdef CONFIG_HOTPLUG_CPU
608
609static int __init setup_additional_cpus(char *s)
610{
611 additional_cpus = simple_strtoul(s, NULL, 0);
612 return 0;
613}
614early_param("additional_cpus", setup_additional_cpus);
615
37a33026
HC
616static int __init setup_possible_cpus(char *s)
617{
618 possible_cpus = simple_strtoul(s, NULL, 0);
619 return 0;
620}
621early_param("possible_cpus", setup_possible_cpus);
622
1da177e4
LT
623int
624__cpu_disable(void)
625{
626 unsigned long flags;
94c12cc7 627 struct ec_creg_mask_parms cr_parms;
f3705136 628 int cpu = smp_processor_id();
1da177e4
LT
629
630 spin_lock_irqsave(&smp_reserve_lock, flags);
f3705136 631 if (smp_cpu_reserved[cpu] != 0) {
1da177e4
LT
632 spin_unlock_irqrestore(&smp_reserve_lock, flags);
633 return -EBUSY;
634 }
f3705136 635 cpu_clear(cpu, cpu_online_map);
1da177e4 636
1da177e4 637 /* Disable pfault pseudo page faults on this cpu. */
29b08d2b 638 pfault_fini();
1da177e4 639
94c12cc7
MS
640 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
641 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
1da177e4 642
94c12cc7 643 /* disable all external interrupts */
1da177e4
LT
644 cr_parms.orvals[0] = 0;
645 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
646 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
1da177e4 647 /* disable all I/O interrupts */
1da177e4
LT
648 cr_parms.orvals[6] = 0;
649 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
650 1<<27 | 1<<26 | 1<<25 | 1<<24);
1da177e4 651 /* disable most machine checks */
1da177e4
LT
652 cr_parms.orvals[14] = 0;
653 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
94c12cc7 654
1da177e4
LT
655 smp_ctl_bit_callback(&cr_parms);
656
657 spin_unlock_irqrestore(&smp_reserve_lock, flags);
658 return 0;
659}
660
661void
662__cpu_die(unsigned int cpu)
663{
664 /* Wait until target cpu is down */
665 while (!smp_cpu_not_running(cpu))
666 cpu_relax();
667 printk("Processor %d spun down\n", cpu);
668}
669
670void
671cpu_die(void)
672{
673 idle_task_exit();
674 signal_processor(smp_processor_id(), sigp_stop);
675 BUG();
676 for(;;);
677}
678
255acee7
HC
679#endif /* CONFIG_HOTPLUG_CPU */
680
1da177e4
LT
681/*
682 * Cycle through the processors and setup structures.
683 */
684
685void __init smp_prepare_cpus(unsigned int max_cpus)
686{
687 unsigned long stack;
688 unsigned int cpu;
689 int i;
690
99b2d8df
HC
691 /* request the 0x1201 emergency signal external interrupt */
692 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
693 panic("Couldn't request external interrupt 0x1201");
1da177e4
LT
694 memset(lowcore_ptr,0,sizeof(lowcore_ptr));
695 /*
696 * Initialize prefix pages and stacks for all possible cpus
697 */
698 print_cpu_info(&S390_lowcore.cpu_data);
699
97db7fbf 700 for_each_possible_cpu(i) {
1da177e4
LT
701 lowcore_ptr[i] = (struct _lowcore *)
702 __get_free_pages(GFP_KERNEL|GFP_DMA,
703 sizeof(void*) == 8 ? 1 : 0);
704 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
705 if (lowcore_ptr[i] == NULL || stack == 0ULL)
706 panic("smp_boot_cpus failed to allocate memory\n");
707
708 *(lowcore_ptr[i]) = S390_lowcore;
709 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
1da177e4
LT
710 stack = __get_free_pages(GFP_KERNEL,0);
711 if (stack == 0ULL)
712 panic("smp_boot_cpus failed to allocate memory\n");
713 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
347a8dc3 714#ifndef CONFIG_64BIT
77fa2245
HC
715 if (MACHINE_HAS_IEEE) {
716 lowcore_ptr[i]->extended_save_area_addr =
717 (__u32) __get_free_pages(GFP_KERNEL,0);
718 if (lowcore_ptr[i]->extended_save_area_addr == 0)
719 panic("smp_boot_cpus failed to "
720 "allocate memory\n");
721 }
1da177e4
LT
722#endif
723 }
347a8dc3 724#ifndef CONFIG_64BIT
77fa2245
HC
725 if (MACHINE_HAS_IEEE)
726 ctl_set_bit(14, 29); /* enable extended save area */
727#endif
1da177e4
LT
728 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
729
97db7fbf 730 for_each_possible_cpu(cpu)
1da177e4
LT
731 if (cpu != smp_processor_id())
732 smp_create_idle(cpu);
733}
734
735void __devinit smp_prepare_boot_cpu(void)
736{
737 BUG_ON(smp_processor_id() != 0);
738
739 cpu_set(0, cpu_online_map);
1da177e4
LT
740 S390_lowcore.percpu_offset = __per_cpu_offset[0];
741 current_set[0] = current;
742}
743
744void smp_cpus_done(unsigned int max_cpus)
745{
54330456 746 cpu_present_map = cpu_possible_map;
1da177e4
LT
747}
748
749/*
750 * the frequency of the profiling timer can be changed
751 * by writing a multiplier value into /proc/profile.
752 *
753 * usually you want to run this on all CPUs ;)
754 */
755int setup_profiling_timer(unsigned int multiplier)
756{
757 return 0;
758}
759
760static DEFINE_PER_CPU(struct cpu, cpu_devices);
761
762static int __init topology_init(void)
763{
764 int cpu;
765 int ret;
766
97db7fbf 767 for_each_possible_cpu(cpu) {
6721f778
HC
768 struct cpu *c = &per_cpu(cpu_devices, cpu);
769
770 c->hotpluggable = 1;
771 ret = register_cpu(c, cpu);
1da177e4
LT
772 if (ret)
773 printk(KERN_WARNING "topology_init: register_cpu %d "
774 "failed (%d)\n", cpu, ret);
775 }
776 return 0;
777}
778
779subsys_initcall(topology_init);
780
255acee7 781EXPORT_SYMBOL(cpu_online_map);
1da177e4
LT
782EXPORT_SYMBOL(cpu_possible_map);
783EXPORT_SYMBOL(lowcore_ptr);
784EXPORT_SYMBOL(smp_ctl_set_bit);
785EXPORT_SYMBOL(smp_ctl_clear_bit);
1da177e4
LT
786EXPORT_SYMBOL(smp_get_cpu);
787EXPORT_SYMBOL(smp_put_cpu);
This page took 0.30628 seconds and 5 git commands to generate.