[SPARC64]: Do not try to write to %tick or %stick on SUN4V.
[deliverable/linux.git] / arch / sparc64 / kernel / smp.c
CommitLineData
1da177e4
LT
1/* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
13#include <linux/smp_lock.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/seq_file.h>
21#include <linux/cache.h>
22#include <linux/jiffies.h>
23#include <linux/profile.h>
24#include <linux/bootmem.h>
25
26#include <asm/head.h>
27#include <asm/ptrace.h>
28#include <asm/atomic.h>
29#include <asm/tlbflush.h>
30#include <asm/mmu_context.h>
31#include <asm/cpudata.h>
32
33#include <asm/irq.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/oplib.h>
37#include <asm/uaccess.h>
38#include <asm/timer.h>
39#include <asm/starfire.h>
40#include <asm/tlb.h>
56fb4df6 41#include <asm/sections.h>
1da177e4 42
1da177e4
LT
43extern void calibrate_delay(void);
44
45/* Please don't make this stuff initdata!!! --DaveM */
46static unsigned char boot_cpu_id;
47
c12a8289
AM
48cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
49cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
1da177e4
LT
50static cpumask_t smp_commenced_mask;
51static cpumask_t cpu_callout_map;
52
53void smp_info(struct seq_file *m)
54{
55 int i;
56
57 seq_printf(m, "State:\n");
58 for (i = 0; i < NR_CPUS; i++) {
59 if (cpu_online(i))
60 seq_printf(m,
61 "CPU%d:\t\tonline\n", i);
62 }
63}
64
65void smp_bogo(struct seq_file *m)
66{
67 int i;
68
69 for (i = 0; i < NR_CPUS; i++)
70 if (cpu_online(i))
71 seq_printf(m,
72 "Cpu%dBogo\t: %lu.%02lu\n"
73 "Cpu%dClkTck\t: %016lx\n",
74 i, cpu_data(i).udelay_val / (500000/HZ),
75 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
76 i, cpu_data(i).clock_tick);
77}
78
79void __init smp_store_cpu_info(int id)
80{
81 int cpu_node;
82
83 /* multiplier and counter set by
84 smp_setup_percpu_timer() */
85 cpu_data(id).udelay_val = loops_per_jiffy;
86
87 cpu_find_by_mid(id, &cpu_node);
88 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
89 "clock-frequency", 0);
90
1da177e4 91 cpu_data(id).idle_volume = 1;
80dc0d6b
DM
92
93 cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
94 16 * 1024);
95 cpu_data(id).dcache_line_size =
96 prom_getintdefault(cpu_node, "dcache-line-size", 32);
97 cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
98 16 * 1024);
99 cpu_data(id).icache_line_size =
100 prom_getintdefault(cpu_node, "icache-line-size", 32);
101 cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
102 4 * 1024 * 1024);
103 cpu_data(id).ecache_line_size =
104 prom_getintdefault(cpu_node, "ecache-line-size", 64);
105 printk("CPU[%d]: Caches "
106 "D[sz(%d):line_sz(%d)] "
107 "I[sz(%d):line_sz(%d)] "
108 "E[sz(%d):line_sz(%d)]\n",
109 id,
110 cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
111 cpu_data(id).icache_size, cpu_data(id).icache_line_size,
112 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
1da177e4
LT
113}
114
115static void smp_setup_percpu_timer(void);
116
117static volatile unsigned long callin_flag = 0;
118
1da177e4
LT
119void __init smp_callin(void)
120{
121 int cpuid = hard_smp_processor_id();
122
56fb4df6 123 __local_per_cpu_offset = __per_cpu_offset(cpuid);
1da177e4 124
490384e7 125 if (tlb_type == hypervisor) {
481295f9 126 sun4v_register_fault_status();
490384e7
DM
127 sun4v_ktsb_register();
128 }
481295f9 129
56fb4df6 130 __flush_tlb_all();
1da177e4
LT
131
132 smp_setup_percpu_timer();
133
816242da
DM
134 if (cheetah_pcache_forced_on)
135 cheetah_enable_pcache();
136
1da177e4
LT
137 local_irq_enable();
138
139 calibrate_delay();
140 smp_store_cpu_info(cpuid);
141 callin_flag = 1;
142 __asm__ __volatile__("membar #Sync\n\t"
143 "flush %%g6" : : : "memory");
144
145 /* Clear this or we will die instantly when we
146 * schedule back to this idler...
147 */
db7d9a4e 148 current_thread_info()->new_child = 0;
1da177e4
LT
149
150 /* Attach to the address space of init_task. */
151 atomic_inc(&init_mm.mm_count);
152 current->active_mm = &init_mm;
153
154 while (!cpu_isset(cpuid, smp_commenced_mask))
4f07118f 155 rmb();
1da177e4
LT
156
157 cpu_set(cpuid, cpu_online_map);
5bfb5d69
NP
158
159 /* idle thread is expected to have preempt disabled */
160 preempt_disable();
1da177e4
LT
161}
162
163void cpu_panic(void)
164{
165 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
166 panic("SMP bolixed\n");
167}
168
d369ddd2 169static unsigned long current_tick_offset __read_mostly;
1da177e4
LT
170
171/* This tick register synchronization scheme is taken entirely from
172 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
173 *
174 * The only change I've made is to rework it so that the master
175 * initiates the synchonization instead of the slave. -DaveM
176 */
177
178#define MASTER 0
179#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
180
181#define NUM_ROUNDS 64 /* magic value */
182#define NUM_ITERS 5 /* likewise */
183
184static DEFINE_SPINLOCK(itc_sync_lock);
185static unsigned long go[SLAVE + 1];
186
187#define DEBUG_TICK_SYNC 0
188
189static inline long get_delta (long *rt, long *master)
190{
191 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
192 unsigned long tcenter, t0, t1, tm;
193 unsigned long i;
194
195 for (i = 0; i < NUM_ITERS; i++) {
196 t0 = tick_ops->get_tick();
197 go[MASTER] = 1;
4f07118f 198 membar_storeload();
1da177e4 199 while (!(tm = go[SLAVE]))
4f07118f 200 rmb();
1da177e4 201 go[SLAVE] = 0;
4f07118f 202 wmb();
1da177e4
LT
203 t1 = tick_ops->get_tick();
204
205 if (t1 - t0 < best_t1 - best_t0)
206 best_t0 = t0, best_t1 = t1, best_tm = tm;
207 }
208
209 *rt = best_t1 - best_t0;
210 *master = best_tm - best_t0;
211
212 /* average best_t0 and best_t1 without overflow: */
213 tcenter = (best_t0/2 + best_t1/2);
214 if (best_t0 % 2 + best_t1 % 2 == 2)
215 tcenter++;
216 return tcenter - best_tm;
217}
218
219void smp_synchronize_tick_client(void)
220{
221 long i, delta, adj, adjust_latency = 0, done = 0;
222 unsigned long flags, rt, master_time_stamp, bound;
223#if DEBUG_TICK_SYNC
224 struct {
225 long rt; /* roundtrip time */
226 long master; /* master's timestamp */
227 long diff; /* difference between midpoint and master's timestamp */
228 long lat; /* estimate of itc adjustment latency */
229 } t[NUM_ROUNDS];
230#endif
231
232 go[MASTER] = 1;
233
234 while (go[MASTER])
4f07118f 235 rmb();
1da177e4
LT
236
237 local_irq_save(flags);
238 {
239 for (i = 0; i < NUM_ROUNDS; i++) {
240 delta = get_delta(&rt, &master_time_stamp);
241 if (delta == 0) {
242 done = 1; /* let's lock on to this... */
243 bound = rt;
244 }
245
246 if (!done) {
247 if (i > 0) {
248 adjust_latency += -delta;
249 adj = -delta + adjust_latency/4;
250 } else
251 adj = -delta;
252
253 tick_ops->add_tick(adj, current_tick_offset);
254 }
255#if DEBUG_TICK_SYNC
256 t[i].rt = rt;
257 t[i].master = master_time_stamp;
258 t[i].diff = delta;
259 t[i].lat = adjust_latency/4;
260#endif
261 }
262 }
263 local_irq_restore(flags);
264
265#if DEBUG_TICK_SYNC
266 for (i = 0; i < NUM_ROUNDS; i++)
267 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
268 t[i].rt, t[i].master, t[i].diff, t[i].lat);
269#endif
270
271 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
272 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
273}
274
275static void smp_start_sync_tick_client(int cpu);
276
277static void smp_synchronize_one_tick(int cpu)
278{
279 unsigned long flags, i;
280
281 go[MASTER] = 0;
282
283 smp_start_sync_tick_client(cpu);
284
285 /* wait for client to be ready */
286 while (!go[MASTER])
4f07118f 287 rmb();
1da177e4
LT
288
289 /* now let the client proceed into his loop */
290 go[MASTER] = 0;
4f07118f 291 membar_storeload();
1da177e4
LT
292
293 spin_lock_irqsave(&itc_sync_lock, flags);
294 {
295 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
296 while (!go[MASTER])
4f07118f 297 rmb();
1da177e4 298 go[MASTER] = 0;
4f07118f 299 wmb();
1da177e4 300 go[SLAVE] = tick_ops->get_tick();
4f07118f 301 membar_storeload();
1da177e4
LT
302 }
303 }
304 spin_unlock_irqrestore(&itc_sync_lock, flags);
305}
306
307extern unsigned long sparc64_cpu_startup;
308
309/* The OBP cpu startup callback truncates the 3rd arg cookie to
310 * 32-bits (I think) so to be safe we have it read the pointer
311 * contained here so we work on >4GB machines. -DaveM
312 */
313static struct thread_info *cpu_new_thread = NULL;
314
315static int __devinit smp_boot_one_cpu(unsigned int cpu)
316{
317 unsigned long entry =
318 (unsigned long)(&sparc64_cpu_startup);
319 unsigned long cookie =
320 (unsigned long)(&cpu_new_thread);
321 struct task_struct *p;
322 int timeout, ret, cpu_node;
323
324 p = fork_idle(cpu);
325 callin_flag = 0;
f3169641 326 cpu_new_thread = task_thread_info(p);
1da177e4
LT
327 cpu_set(cpu, cpu_callout_map);
328
329 cpu_find_by_mid(cpu, &cpu_node);
330 prom_startcpu(cpu_node, entry, cookie);
331
332 for (timeout = 0; timeout < 5000000; timeout++) {
333 if (callin_flag)
334 break;
335 udelay(100);
336 }
337 if (callin_flag) {
338 ret = 0;
339 } else {
340 printk("Processor %d is stuck.\n", cpu);
341 cpu_clear(cpu, cpu_callout_map);
342 ret = -ENODEV;
343 }
344 cpu_new_thread = NULL;
345
346 return ret;
347}
348
349static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
350{
351 u64 result, target;
352 int stuck, tmp;
353
354 if (this_is_starfire) {
355 /* map to real upaid */
356 cpu = (((cpu & 0x3c) << 1) |
357 ((cpu & 0x40) >> 4) |
358 (cpu & 0x3));
359 }
360
361 target = (cpu << 14) | 0x70;
362again:
363 /* Ok, this is the real Spitfire Errata #54.
364 * One must read back from a UDB internal register
365 * after writes to the UDB interrupt dispatch, but
366 * before the membar Sync for that write.
367 * So we use the high UDB control register (ASI 0x7f,
368 * ADDR 0x20) for the dummy read. -DaveM
369 */
370 tmp = 0x40;
371 __asm__ __volatile__(
372 "wrpr %1, %2, %%pstate\n\t"
373 "stxa %4, [%0] %3\n\t"
374 "stxa %5, [%0+%8] %3\n\t"
375 "add %0, %8, %0\n\t"
376 "stxa %6, [%0+%8] %3\n\t"
377 "membar #Sync\n\t"
378 "stxa %%g0, [%7] %3\n\t"
379 "membar #Sync\n\t"
380 "mov 0x20, %%g1\n\t"
381 "ldxa [%%g1] 0x7f, %%g0\n\t"
382 "membar #Sync"
383 : "=r" (tmp)
384 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
385 "r" (data0), "r" (data1), "r" (data2), "r" (target),
386 "r" (0x10), "0" (tmp)
387 : "g1");
388
389 /* NOTE: PSTATE_IE is still clear. */
390 stuck = 100000;
391 do {
392 __asm__ __volatile__("ldxa [%%g0] %1, %0"
393 : "=r" (result)
394 : "i" (ASI_INTR_DISPATCH_STAT));
395 if (result == 0) {
396 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
397 : : "r" (pstate));
398 return;
399 }
400 stuck -= 1;
401 if (stuck == 0)
402 break;
403 } while (result & 0x1);
404 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
405 : : "r" (pstate));
406 if (stuck == 0) {
407 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
408 smp_processor_id(), result);
409 } else {
410 udelay(2);
411 goto again;
412 }
413}
414
415static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
416{
417 u64 pstate;
418 int i;
419
420 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
421 for_each_cpu_mask(i, mask)
422 spitfire_xcall_helper(data0, data1, data2, pstate, i);
423}
424
425/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
426 * packet, but we have no use for that. However we do take advantage of
427 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
428 */
429static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
430{
431 u64 pstate, ver;
92704a1c 432 int nack_busy_id, is_jbus;
1da177e4
LT
433
434 if (cpus_empty(mask))
435 return;
436
437 /* Unfortunately, someone at Sun had the brilliant idea to make the
438 * busy/nack fields hard-coded by ITID number for this Ultra-III
439 * derivative processor.
440 */
441 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
92704a1c
DM
442 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
443 (ver >> 32) == __SERRANO_ID);
1da177e4
LT
444
445 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
446
447retry:
448 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
449 : : "r" (pstate), "i" (PSTATE_IE));
450
451 /* Setup the dispatch data registers. */
452 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
453 "stxa %1, [%4] %6\n\t"
454 "stxa %2, [%5] %6\n\t"
455 "membar #Sync\n\t"
456 : /* no outputs */
457 : "r" (data0), "r" (data1), "r" (data2),
458 "r" (0x40), "r" (0x50), "r" (0x60),
459 "i" (ASI_INTR_W));
460
461 nack_busy_id = 0;
462 {
463 int i;
464
465 for_each_cpu_mask(i, mask) {
466 u64 target = (i << 14) | 0x70;
467
92704a1c 468 if (!is_jbus)
1da177e4
LT
469 target |= (nack_busy_id << 24);
470 __asm__ __volatile__(
471 "stxa %%g0, [%0] %1\n\t"
472 "membar #Sync\n\t"
473 : /* no outputs */
474 : "r" (target), "i" (ASI_INTR_W));
475 nack_busy_id++;
476 }
477 }
478
479 /* Now, poll for completion. */
480 {
481 u64 dispatch_stat;
482 long stuck;
483
484 stuck = 100000 * nack_busy_id;
485 do {
486 __asm__ __volatile__("ldxa [%%g0] %1, %0"
487 : "=r" (dispatch_stat)
488 : "i" (ASI_INTR_DISPATCH_STAT));
489 if (dispatch_stat == 0UL) {
490 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
491 : : "r" (pstate));
492 return;
493 }
494 if (!--stuck)
495 break;
496 } while (dispatch_stat & 0x5555555555555555UL);
497
498 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
499 : : "r" (pstate));
500
501 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
502 /* Busy bits will not clear, continue instead
503 * of freezing up on this cpu.
504 */
505 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
506 smp_processor_id(), dispatch_stat);
507 } else {
508 int i, this_busy_nack = 0;
509
510 /* Delay some random time with interrupts enabled
511 * to prevent deadlock.
512 */
513 udelay(2 * nack_busy_id);
514
515 /* Clear out the mask bits for cpus which did not
516 * NACK us.
517 */
518 for_each_cpu_mask(i, mask) {
519 u64 check_mask;
520
92704a1c 521 if (is_jbus)
1da177e4
LT
522 check_mask = (0x2UL << (2*i));
523 else
524 check_mask = (0x2UL <<
525 this_busy_nack);
526 if ((dispatch_stat & check_mask) == 0)
527 cpu_clear(i, mask);
528 this_busy_nack += 2;
529 }
530
531 goto retry;
532 }
533 }
534}
535
1d2f1f90
DM
536#if 0
537/* Multi-cpu list version. */
538static int init_cpu_list(u16 *list, cpumask_t mask)
539{
540 int i, cnt;
541
542 cnt = 0;
543 for_each_cpu_mask(i, mask)
544 list[cnt++] = i;
545
546 return cnt;
547}
548
549static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask)
550{
551 int i;
552
553 for (i = 0; i < orig_cnt; i++) {
554 if (list[i] == 0xffff)
555 cpu_clear(i, mask);
556 }
557
558 return init_cpu_list(list, mask);
559}
560
a43fe0e7
DM
561static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
562{
1d2f1f90
DM
563 int this_cpu = get_cpu();
564 struct trap_per_cpu *tb = &trap_block[this_cpu];
565 u64 *mondo = __va(tb->cpu_mondo_block_pa);
566 u16 *cpu_list = __va(tb->cpu_list_pa);
567 int cnt, retries;
568
569 mondo[0] = data0;
570 mondo[1] = data1;
571 mondo[2] = data2;
572 wmb();
573
574 retries = 0;
575 cnt = init_cpu_list(cpu_list, mask);
576 do {
164c220f
DM
577 register unsigned long func __asm__("%o5");
578 register unsigned long arg0 __asm__("%o0");
579 register unsigned long arg1 __asm__("%o1");
580 register unsigned long arg2 __asm__("%o2");
1d2f1f90
DM
581
582 func = HV_FAST_CPU_MONDO_SEND;
583 arg0 = cnt;
584 arg1 = tb->cpu_list_pa;
585 arg2 = tb->cpu_mondo_block_pa;
586
587 __asm__ __volatile__("ta %8"
588 : "=&r" (func), "=&r" (arg0),
589 "=&r" (arg1), "=&r" (arg2)
590 : "0" (func), "1" (arg0),
591 "2" (arg1), "3" (arg2),
592 "i" (HV_FAST_TRAP)
593 : "memory");
b5a37e96 594 if (likely(arg0 == HV_EOK))
1d2f1f90
DM
595 break;
596
597 if (unlikely(++retries > 100)) {
598 printk("CPU[%d]: sun4v mondo error %lu\n",
599 this_cpu, func);
600 break;
601 }
602
603 cnt = update_cpu_list(cpu_list, cnt, mask);
604
605 udelay(2 * cnt);
606 } while (1);
607
608 put_cpu();
609}
610#else
611/* Single-cpu list version. */
612static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
613{
614 int this_cpu = get_cpu();
615 struct trap_per_cpu *tb = &trap_block[this_cpu];
616 u64 *mondo = __va(tb->cpu_mondo_block_pa);
617 u16 *cpu_list = __va(tb->cpu_list_pa);
618 int i;
619
620 mondo[0] = data0;
621 mondo[1] = data1;
622 mondo[2] = data2;
623 wmb();
624
625 for_each_cpu_mask(i, mask) {
626 int retries = 0;
627
628 do {
164c220f
DM
629 register unsigned long func __asm__("%o5");
630 register unsigned long arg0 __asm__("%o0");
631 register unsigned long arg1 __asm__("%o1");
632 register unsigned long arg2 __asm__("%o2");
1d2f1f90
DM
633
634 cpu_list[0] = i;
635 func = HV_FAST_CPU_MONDO_SEND;
636 arg0 = 1;
637 arg1 = tb->cpu_list_pa;
638 arg2 = tb->cpu_mondo_block_pa;
639
640 __asm__ __volatile__("ta %8"
641 : "=&r" (func), "=&r" (arg0),
642 "=&r" (arg1), "=&r" (arg2)
643 : "0" (func), "1" (arg0),
644 "2" (arg1), "3" (arg2),
645 "i" (HV_FAST_TRAP)
646 : "memory");
b5a37e96 647 if (likely(arg0 == HV_EOK))
1d2f1f90
DM
648 break;
649
650 if (unlikely(++retries > 100)) {
651 printk("CPU[%d]: sun4v mondo error %lu\n",
652 this_cpu, func);
653 break;
654 }
655
656 udelay(2 * i);
657 } while (1);
658 }
659
660 put_cpu();
a43fe0e7 661}
1d2f1f90 662#endif
a43fe0e7 663
1da177e4
LT
664/* Send cross call to all processors mentioned in MASK
665 * except self.
666 */
667static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
668{
669 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
670 int this_cpu = get_cpu();
671
672 cpus_and(mask, mask, cpu_online_map);
673 cpu_clear(this_cpu, mask);
674
675 if (tlb_type == spitfire)
676 spitfire_xcall_deliver(data0, data1, data2, mask);
a43fe0e7 677 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1da177e4 678 cheetah_xcall_deliver(data0, data1, data2, mask);
a43fe0e7
DM
679 else
680 hypervisor_xcall_deliver(data0, data1, data2, mask);
1da177e4
LT
681 /* NOTE: Caller runs local copy on master. */
682
683 put_cpu();
684}
685
686extern unsigned long xcall_sync_tick;
687
688static void smp_start_sync_tick_client(int cpu)
689{
690 cpumask_t mask = cpumask_of_cpu(cpu);
691
692 smp_cross_call_masked(&xcall_sync_tick,
693 0, 0, 0, mask);
694}
695
696/* Send cross call to all processors except self. */
697#define smp_cross_call(func, ctx, data1, data2) \
698 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
699
700struct call_data_struct {
701 void (*func) (void *info);
702 void *info;
703 atomic_t finished;
704 int wait;
705};
706
707static DEFINE_SPINLOCK(call_lock);
708static struct call_data_struct *call_data;
709
710extern unsigned long xcall_call_function;
711
712/*
713 * You must not call this function with disabled interrupts or from a
714 * hardware interrupt handler or from a bottom half handler.
715 */
bd40791e
DM
716static int smp_call_function_mask(void (*func)(void *info), void *info,
717 int nonatomic, int wait, cpumask_t mask)
1da177e4
LT
718{
719 struct call_data_struct data;
bd40791e 720 int cpus = cpus_weight(mask) - 1;
1da177e4
LT
721 long timeout;
722
723 if (!cpus)
724 return 0;
725
726 /* Can deadlock when called with interrupts disabled */
727 WARN_ON(irqs_disabled());
728
729 data.func = func;
730 data.info = info;
731 atomic_set(&data.finished, 0);
732 data.wait = wait;
733
734 spin_lock(&call_lock);
735
736 call_data = &data;
737
bd40791e 738 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
1da177e4
LT
739
740 /*
741 * Wait for other cpus to complete function or at
742 * least snap the call data.
743 */
744 timeout = 1000000;
745 while (atomic_read(&data.finished) != cpus) {
746 if (--timeout <= 0)
747 goto out_timeout;
748 barrier();
749 udelay(1);
750 }
751
752 spin_unlock(&call_lock);
753
754 return 0;
755
756out_timeout:
757 spin_unlock(&call_lock);
758 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
759 (long) num_online_cpus() - 1L,
760 (long) atomic_read(&data.finished));
761 return 0;
762}
763
bd40791e
DM
764int smp_call_function(void (*func)(void *info), void *info,
765 int nonatomic, int wait)
766{
767 return smp_call_function_mask(func, info, nonatomic, wait,
768 cpu_online_map);
769}
770
1da177e4
LT
771void smp_call_function_client(int irq, struct pt_regs *regs)
772{
773 void (*func) (void *info) = call_data->func;
774 void *info = call_data->info;
775
776 clear_softint(1 << irq);
777 if (call_data->wait) {
778 /* let initiator proceed only after completion */
779 func(info);
780 atomic_inc(&call_data->finished);
781 } else {
782 /* let initiator proceed after getting data */
783 atomic_inc(&call_data->finished);
784 func(info);
785 }
786}
787
bd40791e
DM
788static void tsb_sync(void *info)
789{
790 struct mm_struct *mm = info;
791
792 if (current->active_mm == mm)
793 tsb_context_switch(mm);
794}
795
796void smp_tsb_sync(struct mm_struct *mm)
797{
798 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
799}
800
1da177e4
LT
801extern unsigned long xcall_flush_tlb_mm;
802extern unsigned long xcall_flush_tlb_pending;
803extern unsigned long xcall_flush_tlb_kernel_range;
1da177e4
LT
804extern unsigned long xcall_report_regs;
805extern unsigned long xcall_receive_signal;
806
807#ifdef DCACHE_ALIASING_POSSIBLE
808extern unsigned long xcall_flush_dcache_page_cheetah;
809#endif
810extern unsigned long xcall_flush_dcache_page_spitfire;
811
812#ifdef CONFIG_DEBUG_DCFLUSH
813extern atomic_t dcpage_flushes;
814extern atomic_t dcpage_flushes_xcall;
815#endif
816
817static __inline__ void __local_flush_dcache_page(struct page *page)
818{
819#ifdef DCACHE_ALIASING_POSSIBLE
820 __flush_dcache_page(page_address(page),
821 ((tlb_type == spitfire) &&
822 page_mapping(page) != NULL));
823#else
824 if (page_mapping(page) != NULL &&
825 tlb_type == spitfire)
826 __flush_icache_page(__pa(page_address(page)));
827#endif
828}
829
830void smp_flush_dcache_page_impl(struct page *page, int cpu)
831{
832 cpumask_t mask = cpumask_of_cpu(cpu);
a43fe0e7
DM
833 int this_cpu;
834
835 if (tlb_type == hypervisor)
836 return;
1da177e4
LT
837
838#ifdef CONFIG_DEBUG_DCFLUSH
839 atomic_inc(&dcpage_flushes);
840#endif
a43fe0e7
DM
841
842 this_cpu = get_cpu();
843
1da177e4
LT
844 if (cpu == this_cpu) {
845 __local_flush_dcache_page(page);
846 } else if (cpu_online(cpu)) {
847 void *pg_addr = page_address(page);
848 u64 data0;
849
850 if (tlb_type == spitfire) {
851 data0 =
852 ((u64)&xcall_flush_dcache_page_spitfire);
853 if (page_mapping(page) != NULL)
854 data0 |= ((u64)1 << 32);
855 spitfire_xcall_deliver(data0,
856 __pa(pg_addr),
857 (u64) pg_addr,
858 mask);
a43fe0e7 859 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
860#ifdef DCACHE_ALIASING_POSSIBLE
861 data0 =
862 ((u64)&xcall_flush_dcache_page_cheetah);
863 cheetah_xcall_deliver(data0,
864 __pa(pg_addr),
865 0, mask);
866#endif
867 }
868#ifdef CONFIG_DEBUG_DCFLUSH
869 atomic_inc(&dcpage_flushes_xcall);
870#endif
871 }
872
873 put_cpu();
874}
875
876void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
877{
878 void *pg_addr = page_address(page);
879 cpumask_t mask = cpu_online_map;
880 u64 data0;
a43fe0e7
DM
881 int this_cpu;
882
883 if (tlb_type == hypervisor)
884 return;
885
886 this_cpu = get_cpu();
1da177e4
LT
887
888 cpu_clear(this_cpu, mask);
889
890#ifdef CONFIG_DEBUG_DCFLUSH
891 atomic_inc(&dcpage_flushes);
892#endif
893 if (cpus_empty(mask))
894 goto flush_self;
895 if (tlb_type == spitfire) {
896 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
897 if (page_mapping(page) != NULL)
898 data0 |= ((u64)1 << 32);
899 spitfire_xcall_deliver(data0,
900 __pa(pg_addr),
901 (u64) pg_addr,
902 mask);
a43fe0e7 903 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
904#ifdef DCACHE_ALIASING_POSSIBLE
905 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
906 cheetah_xcall_deliver(data0,
907 __pa(pg_addr),
908 0, mask);
909#endif
910 }
911#ifdef CONFIG_DEBUG_DCFLUSH
912 atomic_inc(&dcpage_flushes_xcall);
913#endif
914 flush_self:
915 __local_flush_dcache_page(page);
916
917 put_cpu();
918}
919
920void smp_receive_signal(int cpu)
921{
922 cpumask_t mask = cpumask_of_cpu(cpu);
923
924 if (cpu_online(cpu)) {
925 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
926
927 if (tlb_type == spitfire)
928 spitfire_xcall_deliver(data0, 0, 0, mask);
a43fe0e7 929 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1da177e4 930 cheetah_xcall_deliver(data0, 0, 0, mask);
a43fe0e7
DM
931 else if (tlb_type == hypervisor)
932 hypervisor_xcall_deliver(data0, 0, 0, mask);
1da177e4
LT
933 }
934}
935
936void smp_receive_signal_client(int irq, struct pt_regs *regs)
937{
938 /* Just return, rtrap takes care of the rest. */
939 clear_softint(1 << irq);
940}
941
942void smp_report_regs(void)
943{
944 smp_cross_call(&xcall_report_regs, 0, 0, 0);
945}
946
1da177e4
LT
947/* We know that the window frames of the user have been flushed
948 * to the stack before we get here because all callers of us
949 * are flush_tlb_*() routines, and these run after flush_cache_*()
950 * which performs the flushw.
951 *
952 * The SMP TLB coherency scheme we use works as follows:
953 *
954 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
955 * space has (potentially) executed on, this is the heuristic
956 * we use to avoid doing cross calls.
957 *
958 * Also, for flushing from kswapd and also for clones, we
959 * use cpu_vm_mask as the list of cpus to make run the TLB.
960 *
961 * 2) TLB context numbers are shared globally across all processors
962 * in the system, this allows us to play several games to avoid
963 * cross calls.
964 *
965 * One invariant is that when a cpu switches to a process, and
966 * that processes tsk->active_mm->cpu_vm_mask does not have the
967 * current cpu's bit set, that tlb context is flushed locally.
968 *
969 * If the address space is non-shared (ie. mm->count == 1) we avoid
970 * cross calls when we want to flush the currently running process's
971 * tlb state. This is done by clearing all cpu bits except the current
972 * processor's in current->active_mm->cpu_vm_mask and performing the
973 * flush locally only. This will force any subsequent cpus which run
974 * this task to flush the context from the local tlb if the process
975 * migrates to another cpu (again).
976 *
977 * 3) For shared address spaces (threads) and swapping we bite the
978 * bullet for most cases and perform the cross call (but only to
979 * the cpus listed in cpu_vm_mask).
980 *
981 * The performance gain from "optimizing" away the cross call for threads is
982 * questionable (in theory the big win for threads is the massive sharing of
983 * address space state across processors).
984 */
62dbec78
DM
985
986/* This currently is only used by the hugetlb arch pre-fault
987 * hook on UltraSPARC-III+ and later when changing the pagesize
988 * bits of the context register for an address space.
989 */
1da177e4
LT
990void smp_flush_tlb_mm(struct mm_struct *mm)
991{
62dbec78
DM
992 u32 ctx = CTX_HWBITS(mm->context);
993 int cpu = get_cpu();
1da177e4 994
62dbec78
DM
995 if (atomic_read(&mm->mm_users) == 1) {
996 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
997 goto local_flush_and_out;
998 }
1da177e4 999
62dbec78
DM
1000 smp_cross_call_masked(&xcall_flush_tlb_mm,
1001 ctx, 0, 0,
1002 mm->cpu_vm_mask);
1da177e4 1003
62dbec78
DM
1004local_flush_and_out:
1005 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1da177e4 1006
62dbec78 1007 put_cpu();
1da177e4
LT
1008}
1009
1010void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1011{
1012 u32 ctx = CTX_HWBITS(mm->context);
1013 int cpu = get_cpu();
1014
dedeb002 1015 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1da177e4 1016 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
dedeb002
HD
1017 else
1018 smp_cross_call_masked(&xcall_flush_tlb_pending,
1019 ctx, nr, (unsigned long) vaddrs,
1020 mm->cpu_vm_mask);
1da177e4 1021
1da177e4
LT
1022 __flush_tlb_pending(ctx, nr, vaddrs);
1023
1024 put_cpu();
1025}
1026
1027void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1028{
1029 start &= PAGE_MASK;
1030 end = PAGE_ALIGN(end);
1031 if (start != end) {
1032 smp_cross_call(&xcall_flush_tlb_kernel_range,
1033 0, start, end);
1034
1035 __flush_tlb_kernel_range(start, end);
1036 }
1037}
1038
1039/* CPU capture. */
1040/* #define CAPTURE_DEBUG */
1041extern unsigned long xcall_capture;
1042
1043static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1044static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1045static unsigned long penguins_are_doing_time;
1046
1047void smp_capture(void)
1048{
1049 int result = atomic_add_ret(1, &smp_capture_depth);
1050
1051 if (result == 1) {
1052 int ncpus = num_online_cpus();
1053
1054#ifdef CAPTURE_DEBUG
1055 printk("CPU[%d]: Sending penguins to jail...",
1056 smp_processor_id());
1057#endif
1058 penguins_are_doing_time = 1;
4f07118f 1059 membar_storestore_loadstore();
1da177e4
LT
1060 atomic_inc(&smp_capture_registry);
1061 smp_cross_call(&xcall_capture, 0, 0, 0);
1062 while (atomic_read(&smp_capture_registry) != ncpus)
4f07118f 1063 rmb();
1da177e4
LT
1064#ifdef CAPTURE_DEBUG
1065 printk("done\n");
1066#endif
1067 }
1068}
1069
1070void smp_release(void)
1071{
1072 if (atomic_dec_and_test(&smp_capture_depth)) {
1073#ifdef CAPTURE_DEBUG
1074 printk("CPU[%d]: Giving pardon to "
1075 "imprisoned penguins\n",
1076 smp_processor_id());
1077#endif
1078 penguins_are_doing_time = 0;
4f07118f 1079 membar_storeload_storestore();
1da177e4
LT
1080 atomic_dec(&smp_capture_registry);
1081 }
1082}
1083
1084/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1085 * can service tlb flush xcalls...
1086 */
1087extern void prom_world(int);
96c6e0d8 1088
1da177e4
LT
1089void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1090{
1da177e4
LT
1091 clear_softint(1 << irq);
1092
1093 preempt_disable();
1094
1095 __asm__ __volatile__("flushw");
1da177e4
LT
1096 prom_world(1);
1097 atomic_inc(&smp_capture_registry);
4f07118f 1098 membar_storeload_storestore();
1da177e4 1099 while (penguins_are_doing_time)
4f07118f 1100 rmb();
1da177e4
LT
1101 atomic_dec(&smp_capture_registry);
1102 prom_world(0);
1103
1104 preempt_enable();
1105}
1106
1da177e4
LT
1107#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1108#define prof_counter(__cpu) cpu_data(__cpu).counter
1109
1110void smp_percpu_timer_interrupt(struct pt_regs *regs)
1111{
1112 unsigned long compare, tick, pstate;
1113 int cpu = smp_processor_id();
1114 int user = user_mode(regs);
1115
1116 /*
1117 * Check for level 14 softint.
1118 */
1119 {
1120 unsigned long tick_mask = tick_ops->softint_mask;
1121
1122 if (!(get_softint() & tick_mask)) {
1123 extern void handler_irq(int, struct pt_regs *);
1124
1125 handler_irq(14, regs);
1126 return;
1127 }
1128 clear_softint(tick_mask);
1129 }
1130
1131 do {
1132 profile_tick(CPU_PROFILING, regs);
1133 if (!--prof_counter(cpu)) {
1134 irq_enter();
1135
1136 if (cpu == boot_cpu_id) {
1137 kstat_this_cpu.irqs[0]++;
1138 timer_tick_interrupt(regs);
1139 }
1140
1141 update_process_times(user);
1142
1143 irq_exit();
1144
1145 prof_counter(cpu) = prof_multiplier(cpu);
1146 }
1147
1148 /* Guarantee that the following sequences execute
1149 * uninterrupted.
1150 */
1151 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1152 "wrpr %0, %1, %%pstate"
1153 : "=r" (pstate)
1154 : "i" (PSTATE_IE));
1155
1156 compare = tick_ops->add_compare(current_tick_offset);
1157 tick = tick_ops->get_tick();
1158
1159 /* Restore PSTATE_IE. */
1160 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1161 : /* no outputs */
1162 : "r" (pstate));
1163 } while (time_after_eq(tick, compare));
1164}
1165
1166static void __init smp_setup_percpu_timer(void)
1167{
1168 int cpu = smp_processor_id();
1169 unsigned long pstate;
1170
1171 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1172
1173 /* Guarantee that the following sequences execute
1174 * uninterrupted.
1175 */
1176 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1177 "wrpr %0, %1, %%pstate"
1178 : "=r" (pstate)
1179 : "i" (PSTATE_IE));
1180
1181 tick_ops->init_tick(current_tick_offset);
1182
1183 /* Restore PSTATE_IE. */
1184 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1185 : /* no outputs */
1186 : "r" (pstate));
1187}
1188
1189void __init smp_tick_init(void)
1190{
1191 boot_cpu_id = hard_smp_processor_id();
1192 current_tick_offset = timer_tick_offset;
1193
1194 cpu_set(boot_cpu_id, cpu_online_map);
1195 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1196}
1197
1198/* /proc/profile writes can call this, don't __init it please. */
1199static DEFINE_SPINLOCK(prof_setup_lock);
1200
1201int setup_profiling_timer(unsigned int multiplier)
1202{
1203 unsigned long flags;
1204 int i;
1205
1206 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1207 return -EINVAL;
1208
1209 spin_lock_irqsave(&prof_setup_lock, flags);
1210 for (i = 0; i < NR_CPUS; i++)
1211 prof_multiplier(i) = multiplier;
1212 current_tick_offset = (timer_tick_offset / multiplier);
1213 spin_unlock_irqrestore(&prof_setup_lock, flags);
1214
1215 return 0;
1216}
1217
7abea921 1218/* Constrain the number of cpus to max_cpus. */
1da177e4
LT
1219void __init smp_prepare_cpus(unsigned int max_cpus)
1220{
1da177e4 1221 if (num_possible_cpus() > max_cpus) {
7abea921
DM
1222 int instance, mid;
1223
1da177e4
LT
1224 instance = 0;
1225 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1226 if (mid != boot_cpu_id) {
1227 cpu_clear(mid, phys_cpu_present_map);
1228 if (num_possible_cpus() <= max_cpus)
1229 break;
1230 }
1231 instance++;
1232 }
1233 }
1234
1235 smp_store_cpu_info(boot_cpu_id);
1236}
1237
7abea921
DM
1238/* Set this up early so that things like the scheduler can init
1239 * properly. We use the same cpu mask for both the present and
1240 * possible cpu map.
1241 */
1242void __init smp_setup_cpu_possible_map(void)
1243{
1244 int instance, mid;
1245
1246 instance = 0;
1247 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1248 if (mid < NR_CPUS)
1249 cpu_set(mid, phys_cpu_present_map);
1250 instance++;
1251 }
1252}
1253
1da177e4
LT
1254void __devinit smp_prepare_boot_cpu(void)
1255{
56fb4df6
DM
1256 int cpu = hard_smp_processor_id();
1257
1258 if (cpu >= NR_CPUS) {
1da177e4
LT
1259 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1260 prom_halt();
1261 }
1262
56fb4df6
DM
1263 current_thread_info()->cpu = cpu;
1264 __local_per_cpu_offset = __per_cpu_offset(cpu);
1da177e4
LT
1265
1266 cpu_set(smp_processor_id(), cpu_online_map);
1267 cpu_set(smp_processor_id(), phys_cpu_present_map);
1268}
1269
1270int __devinit __cpu_up(unsigned int cpu)
1271{
1272 int ret = smp_boot_one_cpu(cpu);
1273
1274 if (!ret) {
1275 cpu_set(cpu, smp_commenced_mask);
1276 while (!cpu_isset(cpu, cpu_online_map))
1277 mb();
1278 if (!cpu_isset(cpu, cpu_online_map)) {
1279 ret = -ENODEV;
1280 } else {
1281 smp_synchronize_one_tick(cpu);
1282 }
1283 }
1284 return ret;
1285}
1286
1287void __init smp_cpus_done(unsigned int max_cpus)
1288{
1289 unsigned long bogosum = 0;
1290 int i;
1291
1292 for (i = 0; i < NR_CPUS; i++) {
1293 if (cpu_online(i))
1294 bogosum += cpu_data(i).udelay_val;
1295 }
1296 printk("Total of %ld processors activated "
1297 "(%lu.%02lu BogoMIPS).\n",
1298 (long) num_online_cpus(),
1299 bogosum/(500000/HZ),
1300 (bogosum/(5000/HZ))%100);
1301}
1302
1da177e4
LT
1303void smp_send_reschedule(int cpu)
1304{
64c7c8f8 1305 smp_receive_signal(cpu);
1da177e4
LT
1306}
1307
1308/* This is a nop because we capture all other cpus
1309 * anyways when making the PROM active.
1310 */
1311void smp_send_stop(void)
1312{
1313}
1314
d369ddd2
DM
1315unsigned long __per_cpu_base __read_mostly;
1316unsigned long __per_cpu_shift __read_mostly;
1da177e4
LT
1317
1318EXPORT_SYMBOL(__per_cpu_base);
1319EXPORT_SYMBOL(__per_cpu_shift);
1320
1321void __init setup_per_cpu_areas(void)
1322{
1323 unsigned long goal, size, i;
1324 char *ptr;
1da177e4
LT
1325
1326 /* Copy section for each CPU (we discard the original) */
56fb4df6 1327 goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
1da177e4
LT
1328#ifdef CONFIG_MODULES
1329 if (goal < PERCPU_ENOUGH_ROOM)
1330 goal = PERCPU_ENOUGH_ROOM;
1331#endif
1332 __per_cpu_shift = 0;
1333 for (size = 1UL; size < goal; size <<= 1UL)
1334 __per_cpu_shift++;
1335
56fb4df6 1336 ptr = alloc_bootmem(size * NR_CPUS);
1da177e4
LT
1337
1338 __per_cpu_base = ptr - __per_cpu_start;
1339
1da177e4
LT
1340 for (i = 0; i < NR_CPUS; i++, ptr += size)
1341 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1da177e4 1342}
This page took 0.159839 seconds and 5 git commands to generate.