[SPARC64]: Provide mmu statistics via sysfs.
[deliverable/linux.git] / arch / sparc64 / kernel / smp.c
CommitLineData
1da177e4
LT
1/* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
1da177e4
LT
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
20#include <linux/cache.h>
21#include <linux/jiffies.h>
22#include <linux/profile.h>
23#include <linux/bootmem.h>
24
25#include <asm/head.h>
26#include <asm/ptrace.h>
27#include <asm/atomic.h>
28#include <asm/tlbflush.h>
29#include <asm/mmu_context.h>
30#include <asm/cpudata.h>
31
32#include <asm/irq.h>
6d24c8dc 33#include <asm/irq_regs.h>
1da177e4
LT
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/oplib.h>
37#include <asm/uaccess.h>
38#include <asm/timer.h>
39#include <asm/starfire.h>
40#include <asm/tlb.h>
56fb4df6 41#include <asm/sections.h>
07f8e5f3 42#include <asm/prom.h>
5cbc3073 43#include <asm/mdesc.h>
1da177e4 44
1da177e4
LT
45extern void calibrate_delay(void);
46
47/* Please don't make this stuff initdata!!! --DaveM */
777a4475 48unsigned char boot_cpu_id;
1da177e4 49
c12a8289
AM
50cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
51cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
8935dced
DM
52cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
53 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
1da177e4
LT
54static cpumask_t smp_commenced_mask;
55static cpumask_t cpu_callout_map;
56
57void smp_info(struct seq_file *m)
58{
59 int i;
60
61 seq_printf(m, "State:\n");
394e3902
AM
62 for_each_online_cpu(i)
63 seq_printf(m, "CPU%d:\t\tonline\n", i);
1da177e4
LT
64}
65
66void smp_bogo(struct seq_file *m)
67{
68 int i;
69
394e3902
AM
70 for_each_online_cpu(i)
71 seq_printf(m,
72 "Cpu%dBogo\t: %lu.%02lu\n"
73 "Cpu%dClkTck\t: %016lx\n",
74 i, cpu_data(i).udelay_val / (500000/HZ),
75 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
76 i, cpu_data(i).clock_tick);
1da177e4
LT
77}
78
112f4871 79extern void setup_sparc64_timer(void);
1da177e4
LT
80
81static volatile unsigned long callin_flag = 0;
82
1da177e4
LT
83void __init smp_callin(void)
84{
85 int cpuid = hard_smp_processor_id();
86
56fb4df6 87 __local_per_cpu_offset = __per_cpu_offset(cpuid);
1da177e4 88
4a07e646 89 if (tlb_type == hypervisor)
490384e7 90 sun4v_ktsb_register();
481295f9 91
56fb4df6 92 __flush_tlb_all();
1da177e4 93
112f4871 94 setup_sparc64_timer();
1da177e4 95
816242da
DM
96 if (cheetah_pcache_forced_on)
97 cheetah_enable_pcache();
98
1da177e4
LT
99 local_irq_enable();
100
101 calibrate_delay();
5cbc3073 102 cpu_data(cpuid).udelay_val = loops_per_jiffy;
1da177e4
LT
103 callin_flag = 1;
104 __asm__ __volatile__("membar #Sync\n\t"
105 "flush %%g6" : : : "memory");
106
107 /* Clear this or we will die instantly when we
108 * schedule back to this idler...
109 */
db7d9a4e 110 current_thread_info()->new_child = 0;
1da177e4
LT
111
112 /* Attach to the address space of init_task. */
113 atomic_inc(&init_mm.mm_count);
114 current->active_mm = &init_mm;
115
116 while (!cpu_isset(cpuid, smp_commenced_mask))
4f07118f 117 rmb();
1da177e4
LT
118
119 cpu_set(cpuid, cpu_online_map);
5bfb5d69
NP
120
121 /* idle thread is expected to have preempt disabled */
122 preempt_disable();
1da177e4
LT
123}
124
125void cpu_panic(void)
126{
127 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
128 panic("SMP bolixed\n");
129}
130
1da177e4
LT
131/* This tick register synchronization scheme is taken entirely from
132 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
133 *
134 * The only change I've made is to rework it so that the master
135 * initiates the synchonization instead of the slave. -DaveM
136 */
137
138#define MASTER 0
139#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
140
141#define NUM_ROUNDS 64 /* magic value */
142#define NUM_ITERS 5 /* likewise */
143
144static DEFINE_SPINLOCK(itc_sync_lock);
145static unsigned long go[SLAVE + 1];
146
147#define DEBUG_TICK_SYNC 0
148
149static inline long get_delta (long *rt, long *master)
150{
151 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
152 unsigned long tcenter, t0, t1, tm;
153 unsigned long i;
154
155 for (i = 0; i < NUM_ITERS; i++) {
156 t0 = tick_ops->get_tick();
157 go[MASTER] = 1;
4f07118f 158 membar_storeload();
1da177e4 159 while (!(tm = go[SLAVE]))
4f07118f 160 rmb();
1da177e4 161 go[SLAVE] = 0;
4f07118f 162 wmb();
1da177e4
LT
163 t1 = tick_ops->get_tick();
164
165 if (t1 - t0 < best_t1 - best_t0)
166 best_t0 = t0, best_t1 = t1, best_tm = tm;
167 }
168
169 *rt = best_t1 - best_t0;
170 *master = best_tm - best_t0;
171
172 /* average best_t0 and best_t1 without overflow: */
173 tcenter = (best_t0/2 + best_t1/2);
174 if (best_t0 % 2 + best_t1 % 2 == 2)
175 tcenter++;
176 return tcenter - best_tm;
177}
178
179void smp_synchronize_tick_client(void)
180{
181 long i, delta, adj, adjust_latency = 0, done = 0;
182 unsigned long flags, rt, master_time_stamp, bound;
183#if DEBUG_TICK_SYNC
184 struct {
185 long rt; /* roundtrip time */
186 long master; /* master's timestamp */
187 long diff; /* difference between midpoint and master's timestamp */
188 long lat; /* estimate of itc adjustment latency */
189 } t[NUM_ROUNDS];
190#endif
191
192 go[MASTER] = 1;
193
194 while (go[MASTER])
4f07118f 195 rmb();
1da177e4
LT
196
197 local_irq_save(flags);
198 {
199 for (i = 0; i < NUM_ROUNDS; i++) {
200 delta = get_delta(&rt, &master_time_stamp);
201 if (delta == 0) {
202 done = 1; /* let's lock on to this... */
203 bound = rt;
204 }
205
206 if (!done) {
207 if (i > 0) {
208 adjust_latency += -delta;
209 adj = -delta + adjust_latency/4;
210 } else
211 adj = -delta;
212
112f4871 213 tick_ops->add_tick(adj);
1da177e4
LT
214 }
215#if DEBUG_TICK_SYNC
216 t[i].rt = rt;
217 t[i].master = master_time_stamp;
218 t[i].diff = delta;
219 t[i].lat = adjust_latency/4;
220#endif
221 }
222 }
223 local_irq_restore(flags);
224
225#if DEBUG_TICK_SYNC
226 for (i = 0; i < NUM_ROUNDS; i++)
227 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
228 t[i].rt, t[i].master, t[i].diff, t[i].lat);
229#endif
230
231 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
232 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
233}
234
235static void smp_start_sync_tick_client(int cpu);
236
237static void smp_synchronize_one_tick(int cpu)
238{
239 unsigned long flags, i;
240
241 go[MASTER] = 0;
242
243 smp_start_sync_tick_client(cpu);
244
245 /* wait for client to be ready */
246 while (!go[MASTER])
4f07118f 247 rmb();
1da177e4
LT
248
249 /* now let the client proceed into his loop */
250 go[MASTER] = 0;
4f07118f 251 membar_storeload();
1da177e4
LT
252
253 spin_lock_irqsave(&itc_sync_lock, flags);
254 {
255 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
256 while (!go[MASTER])
4f07118f 257 rmb();
1da177e4 258 go[MASTER] = 0;
4f07118f 259 wmb();
1da177e4 260 go[SLAVE] = tick_ops->get_tick();
4f07118f 261 membar_storeload();
1da177e4
LT
262 }
263 }
264 spin_unlock_irqrestore(&itc_sync_lock, flags);
265}
266
72aff53f
DM
267extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
268
1da177e4
LT
269extern unsigned long sparc64_cpu_startup;
270
271/* The OBP cpu startup callback truncates the 3rd arg cookie to
272 * 32-bits (I think) so to be safe we have it read the pointer
273 * contained here so we work on >4GB machines. -DaveM
274 */
275static struct thread_info *cpu_new_thread = NULL;
276
277static int __devinit smp_boot_one_cpu(unsigned int cpu)
278{
279 unsigned long entry =
280 (unsigned long)(&sparc64_cpu_startup);
281 unsigned long cookie =
282 (unsigned long)(&cpu_new_thread);
283 struct task_struct *p;
7890f794 284 int timeout, ret;
1da177e4
LT
285
286 p = fork_idle(cpu);
287 callin_flag = 0;
f3169641 288 cpu_new_thread = task_thread_info(p);
1da177e4
LT
289 cpu_set(cpu, cpu_callout_map);
290
7890f794 291 if (tlb_type == hypervisor) {
72aff53f
DM
292 /* Alloc the mondo queues, cpu will load them. */
293 sun4v_init_mondo_queues(0, cpu, 1, 0);
294
7890f794
DM
295 prom_startcpu_cpuid(cpu, entry, cookie);
296 } else {
5cbc3073 297 struct device_node *dp = of_find_node_by_cpuid(cpu);
7890f794 298
07f8e5f3 299 prom_startcpu(dp->node, entry, cookie);
7890f794 300 }
1da177e4
LT
301
302 for (timeout = 0; timeout < 5000000; timeout++) {
303 if (callin_flag)
304 break;
305 udelay(100);
306 }
72aff53f 307
1da177e4
LT
308 if (callin_flag) {
309 ret = 0;
310 } else {
311 printk("Processor %d is stuck.\n", cpu);
312 cpu_clear(cpu, cpu_callout_map);
313 ret = -ENODEV;
314 }
315 cpu_new_thread = NULL;
316
317 return ret;
318}
319
320static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
321{
322 u64 result, target;
323 int stuck, tmp;
324
325 if (this_is_starfire) {
326 /* map to real upaid */
327 cpu = (((cpu & 0x3c) << 1) |
328 ((cpu & 0x40) >> 4) |
329 (cpu & 0x3));
330 }
331
332 target = (cpu << 14) | 0x70;
333again:
334 /* Ok, this is the real Spitfire Errata #54.
335 * One must read back from a UDB internal register
336 * after writes to the UDB interrupt dispatch, but
337 * before the membar Sync for that write.
338 * So we use the high UDB control register (ASI 0x7f,
339 * ADDR 0x20) for the dummy read. -DaveM
340 */
341 tmp = 0x40;
342 __asm__ __volatile__(
343 "wrpr %1, %2, %%pstate\n\t"
344 "stxa %4, [%0] %3\n\t"
345 "stxa %5, [%0+%8] %3\n\t"
346 "add %0, %8, %0\n\t"
347 "stxa %6, [%0+%8] %3\n\t"
348 "membar #Sync\n\t"
349 "stxa %%g0, [%7] %3\n\t"
350 "membar #Sync\n\t"
351 "mov 0x20, %%g1\n\t"
352 "ldxa [%%g1] 0x7f, %%g0\n\t"
353 "membar #Sync"
354 : "=r" (tmp)
355 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
356 "r" (data0), "r" (data1), "r" (data2), "r" (target),
357 "r" (0x10), "0" (tmp)
358 : "g1");
359
360 /* NOTE: PSTATE_IE is still clear. */
361 stuck = 100000;
362 do {
363 __asm__ __volatile__("ldxa [%%g0] %1, %0"
364 : "=r" (result)
365 : "i" (ASI_INTR_DISPATCH_STAT));
366 if (result == 0) {
367 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
368 : : "r" (pstate));
369 return;
370 }
371 stuck -= 1;
372 if (stuck == 0)
373 break;
374 } while (result & 0x1);
375 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
376 : : "r" (pstate));
377 if (stuck == 0) {
378 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
379 smp_processor_id(), result);
380 } else {
381 udelay(2);
382 goto again;
383 }
384}
385
386static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
387{
388 u64 pstate;
389 int i;
390
391 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
392 for_each_cpu_mask(i, mask)
393 spitfire_xcall_helper(data0, data1, data2, pstate, i);
394}
395
396/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
397 * packet, but we have no use for that. However we do take advantage of
398 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
399 */
400static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
401{
402 u64 pstate, ver;
22adb358 403 int nack_busy_id, is_jbus, need_more;
1da177e4
LT
404
405 if (cpus_empty(mask))
406 return;
407
408 /* Unfortunately, someone at Sun had the brilliant idea to make the
409 * busy/nack fields hard-coded by ITID number for this Ultra-III
410 * derivative processor.
411 */
412 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
92704a1c
DM
413 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
414 (ver >> 32) == __SERRANO_ID);
1da177e4
LT
415
416 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
417
418retry:
22adb358 419 need_more = 0;
1da177e4
LT
420 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
421 : : "r" (pstate), "i" (PSTATE_IE));
422
423 /* Setup the dispatch data registers. */
424 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
425 "stxa %1, [%4] %6\n\t"
426 "stxa %2, [%5] %6\n\t"
427 "membar #Sync\n\t"
428 : /* no outputs */
429 : "r" (data0), "r" (data1), "r" (data2),
430 "r" (0x40), "r" (0x50), "r" (0x60),
431 "i" (ASI_INTR_W));
432
433 nack_busy_id = 0;
434 {
435 int i;
436
437 for_each_cpu_mask(i, mask) {
438 u64 target = (i << 14) | 0x70;
439
92704a1c 440 if (!is_jbus)
1da177e4
LT
441 target |= (nack_busy_id << 24);
442 __asm__ __volatile__(
443 "stxa %%g0, [%0] %1\n\t"
444 "membar #Sync\n\t"
445 : /* no outputs */
446 : "r" (target), "i" (ASI_INTR_W));
447 nack_busy_id++;
22adb358
DM
448 if (nack_busy_id == 32) {
449 need_more = 1;
450 break;
451 }
1da177e4
LT
452 }
453 }
454
455 /* Now, poll for completion. */
456 {
457 u64 dispatch_stat;
458 long stuck;
459
460 stuck = 100000 * nack_busy_id;
461 do {
462 __asm__ __volatile__("ldxa [%%g0] %1, %0"
463 : "=r" (dispatch_stat)
464 : "i" (ASI_INTR_DISPATCH_STAT));
465 if (dispatch_stat == 0UL) {
466 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
467 : : "r" (pstate));
22adb358
DM
468 if (unlikely(need_more)) {
469 int i, cnt = 0;
470 for_each_cpu_mask(i, mask) {
471 cpu_clear(i, mask);
472 cnt++;
473 if (cnt == 32)
474 break;
475 }
476 goto retry;
477 }
1da177e4
LT
478 return;
479 }
480 if (!--stuck)
481 break;
482 } while (dispatch_stat & 0x5555555555555555UL);
483
484 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
485 : : "r" (pstate));
486
487 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
488 /* Busy bits will not clear, continue instead
489 * of freezing up on this cpu.
490 */
491 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
492 smp_processor_id(), dispatch_stat);
493 } else {
494 int i, this_busy_nack = 0;
495
496 /* Delay some random time with interrupts enabled
497 * to prevent deadlock.
498 */
499 udelay(2 * nack_busy_id);
500
501 /* Clear out the mask bits for cpus which did not
502 * NACK us.
503 */
504 for_each_cpu_mask(i, mask) {
505 u64 check_mask;
506
92704a1c 507 if (is_jbus)
1da177e4
LT
508 check_mask = (0x2UL << (2*i));
509 else
510 check_mask = (0x2UL <<
511 this_busy_nack);
512 if ((dispatch_stat & check_mask) == 0)
513 cpu_clear(i, mask);
514 this_busy_nack += 2;
22adb358
DM
515 if (this_busy_nack == 64)
516 break;
1da177e4
LT
517 }
518
519 goto retry;
520 }
521 }
522}
523
1d2f1f90 524/* Multi-cpu list version. */
a43fe0e7
DM
525static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
526{
b830ab66
DM
527 struct trap_per_cpu *tb;
528 u16 *cpu_list;
529 u64 *mondo;
530 cpumask_t error_mask;
531 unsigned long flags, status;
3cab0c3e 532 int cnt, retries, this_cpu, prev_sent, i;
b830ab66 533
17f34f0e
DM
534 if (cpus_empty(mask))
535 return;
536
b830ab66
DM
537 /* We have to do this whole thing with interrupts fully disabled.
538 * Otherwise if we send an xcall from interrupt context it will
539 * corrupt both our mondo block and cpu list state.
540 *
541 * One consequence of this is that we cannot use timeout mechanisms
542 * that depend upon interrupts being delivered locally. So, for
543 * example, we cannot sample jiffies and expect it to advance.
544 *
545 * Fortunately, udelay() uses %stick/%tick so we can use that.
546 */
547 local_irq_save(flags);
548
549 this_cpu = smp_processor_id();
550 tb = &trap_block[this_cpu];
1d2f1f90 551
b830ab66 552 mondo = __va(tb->cpu_mondo_block_pa);
1d2f1f90
DM
553 mondo[0] = data0;
554 mondo[1] = data1;
555 mondo[2] = data2;
556 wmb();
557
b830ab66
DM
558 cpu_list = __va(tb->cpu_list_pa);
559
560 /* Setup the initial cpu list. */
561 cnt = 0;
562 for_each_cpu_mask(i, mask)
563 cpu_list[cnt++] = i;
564
565 cpus_clear(error_mask);
1d2f1f90 566 retries = 0;
3cab0c3e 567 prev_sent = 0;
1d2f1f90 568 do {
3cab0c3e 569 int forward_progress, n_sent;
1d2f1f90 570
b830ab66
DM
571 status = sun4v_cpu_mondo_send(cnt,
572 tb->cpu_list_pa,
573 tb->cpu_mondo_block_pa);
574
575 /* HV_EOK means all cpus received the xcall, we're done. */
576 if (likely(status == HV_EOK))
1d2f1f90 577 break;
b830ab66 578
3cab0c3e
DM
579 /* First, see if we made any forward progress.
580 *
581 * The hypervisor indicates successful sends by setting
582 * cpu list entries to the value 0xffff.
b830ab66 583 */
3cab0c3e 584 n_sent = 0;
b830ab66 585 for (i = 0; i < cnt; i++) {
3cab0c3e
DM
586 if (likely(cpu_list[i] == 0xffff))
587 n_sent++;
1d2f1f90
DM
588 }
589
3cab0c3e
DM
590 forward_progress = 0;
591 if (n_sent > prev_sent)
592 forward_progress = 1;
593
594 prev_sent = n_sent;
595
b830ab66
DM
596 /* If we get a HV_ECPUERROR, then one or more of the cpus
597 * in the list are in error state. Use the cpu_state()
598 * hypervisor call to find out which cpus are in error state.
599 */
600 if (unlikely(status == HV_ECPUERROR)) {
601 for (i = 0; i < cnt; i++) {
602 long err;
603 u16 cpu;
604
605 cpu = cpu_list[i];
606 if (cpu == 0xffff)
607 continue;
608
609 err = sun4v_cpu_state(cpu);
610 if (err >= 0 &&
611 err == HV_CPU_STATE_ERROR) {
3cab0c3e 612 cpu_list[i] = 0xffff;
b830ab66
DM
613 cpu_set(cpu, error_mask);
614 }
615 }
616 } else if (unlikely(status != HV_EWOULDBLOCK))
617 goto fatal_mondo_error;
618
3cab0c3e
DM
619 /* Don't bother rewriting the CPU list, just leave the
620 * 0xffff and non-0xffff entries in there and the
621 * hypervisor will do the right thing.
622 *
623 * Only advance timeout state if we didn't make any
624 * forward progress.
625 */
b830ab66
DM
626 if (unlikely(!forward_progress)) {
627 if (unlikely(++retries > 10000))
628 goto fatal_mondo_timeout;
629
630 /* Delay a little bit to let other cpus catch up
631 * on their cpu mondo queue work.
632 */
633 udelay(2 * cnt);
634 }
1d2f1f90
DM
635 } while (1);
636
b830ab66
DM
637 local_irq_restore(flags);
638
639 if (unlikely(!cpus_empty(error_mask)))
640 goto fatal_mondo_cpu_error;
641
642 return;
643
644fatal_mondo_cpu_error:
645 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
646 "were in error state\n",
647 this_cpu);
648 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
649 for_each_cpu_mask(i, error_mask)
650 printk("%d ", i);
651 printk("]\n");
652 return;
653
654fatal_mondo_timeout:
655 local_irq_restore(flags);
656 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
657 " progress after %d retries.\n",
658 this_cpu, retries);
659 goto dump_cpu_list_and_out;
660
661fatal_mondo_error:
662 local_irq_restore(flags);
663 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
664 this_cpu, status);
665 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
666 "mondo_block_pa(%lx)\n",
667 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
668
669dump_cpu_list_and_out:
670 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
671 for (i = 0; i < cnt; i++)
672 printk("%u ", cpu_list[i]);
673 printk("]\n");
1d2f1f90 674}
a43fe0e7 675
1da177e4
LT
676/* Send cross call to all processors mentioned in MASK
677 * except self.
678 */
679static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
680{
681 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
682 int this_cpu = get_cpu();
683
684 cpus_and(mask, mask, cpu_online_map);
685 cpu_clear(this_cpu, mask);
686
687 if (tlb_type == spitfire)
688 spitfire_xcall_deliver(data0, data1, data2, mask);
a43fe0e7 689 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1da177e4 690 cheetah_xcall_deliver(data0, data1, data2, mask);
a43fe0e7
DM
691 else
692 hypervisor_xcall_deliver(data0, data1, data2, mask);
1da177e4
LT
693 /* NOTE: Caller runs local copy on master. */
694
695 put_cpu();
696}
697
698extern unsigned long xcall_sync_tick;
699
700static void smp_start_sync_tick_client(int cpu)
701{
702 cpumask_t mask = cpumask_of_cpu(cpu);
703
704 smp_cross_call_masked(&xcall_sync_tick,
705 0, 0, 0, mask);
706}
707
708/* Send cross call to all processors except self. */
709#define smp_cross_call(func, ctx, data1, data2) \
710 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
711
712struct call_data_struct {
713 void (*func) (void *info);
714 void *info;
715 atomic_t finished;
716 int wait;
717};
718
aa1d1a0a 719static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
1da177e4
LT
720static struct call_data_struct *call_data;
721
722extern unsigned long xcall_call_function;
723
aa1d1a0a
DM
724/**
725 * smp_call_function(): Run a function on all other CPUs.
726 * @func: The function to run. This must be fast and non-blocking.
727 * @info: An arbitrary pointer to pass to the function.
728 * @nonatomic: currently unused.
729 * @wait: If true, wait (atomically) until function has completed on other CPUs.
730 *
731 * Returns 0 on success, else a negative status code. Does not return until
732 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
733 *
1da177e4
LT
734 * You must not call this function with disabled interrupts or from a
735 * hardware interrupt handler or from a bottom half handler.
736 */
bd40791e
DM
737static int smp_call_function_mask(void (*func)(void *info), void *info,
738 int nonatomic, int wait, cpumask_t mask)
1da177e4
LT
739{
740 struct call_data_struct data;
ee29074d 741 int cpus;
1da177e4 742
1da177e4
LT
743 /* Can deadlock when called with interrupts disabled */
744 WARN_ON(irqs_disabled());
745
746 data.func = func;
747 data.info = info;
748 atomic_set(&data.finished, 0);
749 data.wait = wait;
750
751 spin_lock(&call_lock);
752
ee29074d
DM
753 cpu_clear(smp_processor_id(), mask);
754 cpus = cpus_weight(mask);
755 if (!cpus)
756 goto out_unlock;
757
1da177e4 758 call_data = &data;
aa1d1a0a 759 mb();
1da177e4 760
bd40791e 761 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
1da177e4 762
aa1d1a0a
DM
763 /* Wait for response */
764 while (atomic_read(&data.finished) != cpus)
765 cpu_relax();
1da177e4 766
ee29074d 767out_unlock:
1da177e4
LT
768 spin_unlock(&call_lock);
769
770 return 0;
1da177e4
LT
771}
772
bd40791e
DM
773int smp_call_function(void (*func)(void *info), void *info,
774 int nonatomic, int wait)
775{
776 return smp_call_function_mask(func, info, nonatomic, wait,
777 cpu_online_map);
778}
779
1da177e4
LT
780void smp_call_function_client(int irq, struct pt_regs *regs)
781{
782 void (*func) (void *info) = call_data->func;
783 void *info = call_data->info;
784
785 clear_softint(1 << irq);
786 if (call_data->wait) {
787 /* let initiator proceed only after completion */
788 func(info);
789 atomic_inc(&call_data->finished);
790 } else {
791 /* let initiator proceed after getting data */
792 atomic_inc(&call_data->finished);
793 func(info);
794 }
795}
796
bd40791e
DM
797static void tsb_sync(void *info)
798{
6f25f398 799 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
bd40791e
DM
800 struct mm_struct *mm = info;
801
6f25f398
DM
802 /* It is not valid to test "currrent->active_mm == mm" here.
803 *
804 * The value of "current" is not changed atomically with
805 * switch_mm(). But that's OK, we just need to check the
806 * current cpu's trap block PGD physical address.
807 */
808 if (tp->pgd_paddr == __pa(mm->pgd))
bd40791e
DM
809 tsb_context_switch(mm);
810}
811
812void smp_tsb_sync(struct mm_struct *mm)
813{
814 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
815}
816
1da177e4
LT
817extern unsigned long xcall_flush_tlb_mm;
818extern unsigned long xcall_flush_tlb_pending;
819extern unsigned long xcall_flush_tlb_kernel_range;
1da177e4
LT
820extern unsigned long xcall_report_regs;
821extern unsigned long xcall_receive_signal;
ee29074d 822extern unsigned long xcall_new_mmu_context_version;
1da177e4
LT
823
824#ifdef DCACHE_ALIASING_POSSIBLE
825extern unsigned long xcall_flush_dcache_page_cheetah;
826#endif
827extern unsigned long xcall_flush_dcache_page_spitfire;
828
829#ifdef CONFIG_DEBUG_DCFLUSH
830extern atomic_t dcpage_flushes;
831extern atomic_t dcpage_flushes_xcall;
832#endif
833
834static __inline__ void __local_flush_dcache_page(struct page *page)
835{
836#ifdef DCACHE_ALIASING_POSSIBLE
837 __flush_dcache_page(page_address(page),
838 ((tlb_type == spitfire) &&
839 page_mapping(page) != NULL));
840#else
841 if (page_mapping(page) != NULL &&
842 tlb_type == spitfire)
843 __flush_icache_page(__pa(page_address(page)));
844#endif
845}
846
847void smp_flush_dcache_page_impl(struct page *page, int cpu)
848{
849 cpumask_t mask = cpumask_of_cpu(cpu);
a43fe0e7
DM
850 int this_cpu;
851
852 if (tlb_type == hypervisor)
853 return;
1da177e4
LT
854
855#ifdef CONFIG_DEBUG_DCFLUSH
856 atomic_inc(&dcpage_flushes);
857#endif
a43fe0e7
DM
858
859 this_cpu = get_cpu();
860
1da177e4
LT
861 if (cpu == this_cpu) {
862 __local_flush_dcache_page(page);
863 } else if (cpu_online(cpu)) {
864 void *pg_addr = page_address(page);
865 u64 data0;
866
867 if (tlb_type == spitfire) {
868 data0 =
869 ((u64)&xcall_flush_dcache_page_spitfire);
870 if (page_mapping(page) != NULL)
871 data0 |= ((u64)1 << 32);
872 spitfire_xcall_deliver(data0,
873 __pa(pg_addr),
874 (u64) pg_addr,
875 mask);
a43fe0e7 876 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
877#ifdef DCACHE_ALIASING_POSSIBLE
878 data0 =
879 ((u64)&xcall_flush_dcache_page_cheetah);
880 cheetah_xcall_deliver(data0,
881 __pa(pg_addr),
882 0, mask);
883#endif
884 }
885#ifdef CONFIG_DEBUG_DCFLUSH
886 atomic_inc(&dcpage_flushes_xcall);
887#endif
888 }
889
890 put_cpu();
891}
892
893void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
894{
895 void *pg_addr = page_address(page);
896 cpumask_t mask = cpu_online_map;
897 u64 data0;
a43fe0e7
DM
898 int this_cpu;
899
900 if (tlb_type == hypervisor)
901 return;
902
903 this_cpu = get_cpu();
1da177e4
LT
904
905 cpu_clear(this_cpu, mask);
906
907#ifdef CONFIG_DEBUG_DCFLUSH
908 atomic_inc(&dcpage_flushes);
909#endif
910 if (cpus_empty(mask))
911 goto flush_self;
912 if (tlb_type == spitfire) {
913 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
914 if (page_mapping(page) != NULL)
915 data0 |= ((u64)1 << 32);
916 spitfire_xcall_deliver(data0,
917 __pa(pg_addr),
918 (u64) pg_addr,
919 mask);
a43fe0e7 920 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
921#ifdef DCACHE_ALIASING_POSSIBLE
922 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
923 cheetah_xcall_deliver(data0,
924 __pa(pg_addr),
925 0, mask);
926#endif
927 }
928#ifdef CONFIG_DEBUG_DCFLUSH
929 atomic_inc(&dcpage_flushes_xcall);
930#endif
931 flush_self:
932 __local_flush_dcache_page(page);
933
934 put_cpu();
935}
936
a0663a79
DM
937static void __smp_receive_signal_mask(cpumask_t mask)
938{
939 smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
940}
941
1da177e4
LT
942void smp_receive_signal(int cpu)
943{
944 cpumask_t mask = cpumask_of_cpu(cpu);
945
a0663a79
DM
946 if (cpu_online(cpu))
947 __smp_receive_signal_mask(mask);
1da177e4
LT
948}
949
950void smp_receive_signal_client(int irq, struct pt_regs *regs)
ee29074d
DM
951{
952 clear_softint(1 << irq);
953}
954
955void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1da177e4 956{
a0663a79 957 struct mm_struct *mm;
ee29074d 958 unsigned long flags;
a0663a79 959
1da177e4 960 clear_softint(1 << irq);
a0663a79
DM
961
962 /* See if we need to allocate a new TLB context because
963 * the version of the one we are using is now out of date.
964 */
965 mm = current->active_mm;
ee29074d
DM
966 if (unlikely(!mm || (mm == &init_mm)))
967 return;
a0663a79 968
ee29074d 969 spin_lock_irqsave(&mm->context.lock, flags);
aac0aadf 970
ee29074d
DM
971 if (unlikely(!CTX_VALID(mm->context)))
972 get_new_mmu_context(mm);
aac0aadf 973
ee29074d 974 spin_unlock_irqrestore(&mm->context.lock, flags);
aac0aadf 975
ee29074d
DM
976 load_secondary_context(mm);
977 __flush_tlb_mm(CTX_HWBITS(mm->context),
978 SECONDARY_CONTEXT);
a0663a79
DM
979}
980
981void smp_new_mmu_context_version(void)
982{
ee29074d 983 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1da177e4
LT
984}
985
986void smp_report_regs(void)
987{
988 smp_cross_call(&xcall_report_regs, 0, 0, 0);
989}
990
1da177e4
LT
991/* We know that the window frames of the user have been flushed
992 * to the stack before we get here because all callers of us
993 * are flush_tlb_*() routines, and these run after flush_cache_*()
994 * which performs the flushw.
995 *
996 * The SMP TLB coherency scheme we use works as follows:
997 *
998 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
999 * space has (potentially) executed on, this is the heuristic
1000 * we use to avoid doing cross calls.
1001 *
1002 * Also, for flushing from kswapd and also for clones, we
1003 * use cpu_vm_mask as the list of cpus to make run the TLB.
1004 *
1005 * 2) TLB context numbers are shared globally across all processors
1006 * in the system, this allows us to play several games to avoid
1007 * cross calls.
1008 *
1009 * One invariant is that when a cpu switches to a process, and
1010 * that processes tsk->active_mm->cpu_vm_mask does not have the
1011 * current cpu's bit set, that tlb context is flushed locally.
1012 *
1013 * If the address space is non-shared (ie. mm->count == 1) we avoid
1014 * cross calls when we want to flush the currently running process's
1015 * tlb state. This is done by clearing all cpu bits except the current
1016 * processor's in current->active_mm->cpu_vm_mask and performing the
1017 * flush locally only. This will force any subsequent cpus which run
1018 * this task to flush the context from the local tlb if the process
1019 * migrates to another cpu (again).
1020 *
1021 * 3) For shared address spaces (threads) and swapping we bite the
1022 * bullet for most cases and perform the cross call (but only to
1023 * the cpus listed in cpu_vm_mask).
1024 *
1025 * The performance gain from "optimizing" away the cross call for threads is
1026 * questionable (in theory the big win for threads is the massive sharing of
1027 * address space state across processors).
1028 */
62dbec78
DM
1029
1030/* This currently is only used by the hugetlb arch pre-fault
1031 * hook on UltraSPARC-III+ and later when changing the pagesize
1032 * bits of the context register for an address space.
1033 */
1da177e4
LT
1034void smp_flush_tlb_mm(struct mm_struct *mm)
1035{
62dbec78
DM
1036 u32 ctx = CTX_HWBITS(mm->context);
1037 int cpu = get_cpu();
1da177e4 1038
62dbec78
DM
1039 if (atomic_read(&mm->mm_users) == 1) {
1040 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1041 goto local_flush_and_out;
1042 }
1da177e4 1043
62dbec78
DM
1044 smp_cross_call_masked(&xcall_flush_tlb_mm,
1045 ctx, 0, 0,
1046 mm->cpu_vm_mask);
1da177e4 1047
62dbec78
DM
1048local_flush_and_out:
1049 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1da177e4 1050
62dbec78 1051 put_cpu();
1da177e4
LT
1052}
1053
1054void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1055{
1056 u32 ctx = CTX_HWBITS(mm->context);
1057 int cpu = get_cpu();
1058
dedeb002 1059 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1da177e4 1060 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
dedeb002
HD
1061 else
1062 smp_cross_call_masked(&xcall_flush_tlb_pending,
1063 ctx, nr, (unsigned long) vaddrs,
1064 mm->cpu_vm_mask);
1da177e4 1065
1da177e4
LT
1066 __flush_tlb_pending(ctx, nr, vaddrs);
1067
1068 put_cpu();
1069}
1070
1071void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1072{
1073 start &= PAGE_MASK;
1074 end = PAGE_ALIGN(end);
1075 if (start != end) {
1076 smp_cross_call(&xcall_flush_tlb_kernel_range,
1077 0, start, end);
1078
1079 __flush_tlb_kernel_range(start, end);
1080 }
1081}
1082
1083/* CPU capture. */
1084/* #define CAPTURE_DEBUG */
1085extern unsigned long xcall_capture;
1086
1087static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1088static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1089static unsigned long penguins_are_doing_time;
1090
1091void smp_capture(void)
1092{
1093 int result = atomic_add_ret(1, &smp_capture_depth);
1094
1095 if (result == 1) {
1096 int ncpus = num_online_cpus();
1097
1098#ifdef CAPTURE_DEBUG
1099 printk("CPU[%d]: Sending penguins to jail...",
1100 smp_processor_id());
1101#endif
1102 penguins_are_doing_time = 1;
4f07118f 1103 membar_storestore_loadstore();
1da177e4
LT
1104 atomic_inc(&smp_capture_registry);
1105 smp_cross_call(&xcall_capture, 0, 0, 0);
1106 while (atomic_read(&smp_capture_registry) != ncpus)
4f07118f 1107 rmb();
1da177e4
LT
1108#ifdef CAPTURE_DEBUG
1109 printk("done\n");
1110#endif
1111 }
1112}
1113
1114void smp_release(void)
1115{
1116 if (atomic_dec_and_test(&smp_capture_depth)) {
1117#ifdef CAPTURE_DEBUG
1118 printk("CPU[%d]: Giving pardon to "
1119 "imprisoned penguins\n",
1120 smp_processor_id());
1121#endif
1122 penguins_are_doing_time = 0;
4f07118f 1123 membar_storeload_storestore();
1da177e4
LT
1124 atomic_dec(&smp_capture_registry);
1125 }
1126}
1127
1128/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1129 * can service tlb flush xcalls...
1130 */
1131extern void prom_world(int);
96c6e0d8 1132
1da177e4
LT
1133void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1134{
1da177e4
LT
1135 clear_softint(1 << irq);
1136
1137 preempt_disable();
1138
1139 __asm__ __volatile__("flushw");
1da177e4
LT
1140 prom_world(1);
1141 atomic_inc(&smp_capture_registry);
4f07118f 1142 membar_storeload_storestore();
1da177e4 1143 while (penguins_are_doing_time)
4f07118f 1144 rmb();
1da177e4
LT
1145 atomic_dec(&smp_capture_registry);
1146 prom_world(0);
1147
1148 preempt_enable();
1149}
1150
1da177e4
LT
1151void __init smp_tick_init(void)
1152{
1153 boot_cpu_id = hard_smp_processor_id();
1da177e4
LT
1154}
1155
1156/* /proc/profile writes can call this, don't __init it please. */
1da177e4
LT
1157int setup_profiling_timer(unsigned int multiplier)
1158{
777a4475 1159 return -EINVAL;
1da177e4
LT
1160}
1161
9145bcf6
DM
1162static void __init smp_tune_scheduling(void)
1163{
5cbc3073
DM
1164 unsigned int smallest = ~0U;
1165 int i;
9145bcf6 1166
5cbc3073
DM
1167 for (i = 0; i < NR_CPUS; i++) {
1168 unsigned int val = cpu_data(i).ecache_size;
9145bcf6 1169
5cbc3073 1170 if (val && val < smallest)
9145bcf6 1171 smallest = val;
9145bcf6
DM
1172 }
1173
1174 /* Any value less than 256K is nonsense. */
1175 if (smallest < (256U * 1024U))
1176 smallest = 256 * 1024;
1177
1178 max_cache_size = smallest;
1179
1180 if (smallest < 1U * 1024U * 1024U)
1181 printk(KERN_INFO "Using max_cache_size of %uKB\n",
1182 smallest / 1024U);
1183 else
1184 printk(KERN_INFO "Using max_cache_size of %uMB\n",
1185 smallest / 1024U / 1024U);
1186}
1187
7abea921 1188/* Constrain the number of cpus to max_cpus. */
1da177e4
LT
1189void __init smp_prepare_cpus(unsigned int max_cpus)
1190{
8935dced
DM
1191 int i;
1192
1da177e4 1193 if (num_possible_cpus() > max_cpus) {
5cbc3073
DM
1194 for_each_possible_cpu(i) {
1195 if (i != boot_cpu_id) {
1196 cpu_clear(i, phys_cpu_present_map);
1197 cpu_clear(i, cpu_present_map);
1da177e4
LT
1198 if (num_possible_cpus() <= max_cpus)
1199 break;
1200 }
8935dced
DM
1201 }
1202 }
1203
5cbc3073 1204 cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
9145bcf6 1205 smp_tune_scheduling();
1da177e4
LT
1206}
1207
5cbc3073 1208void __devinit smp_prepare_boot_cpu(void)
7abea921 1209{
7abea921
DM
1210}
1211
5cbc3073 1212void __devinit smp_fill_in_sib_core_maps(void)
1da177e4 1213{
5cbc3073
DM
1214 unsigned int i;
1215
1216 for_each_possible_cpu(i) {
1217 unsigned int j;
1218
1219 if (cpu_data(i).core_id == 0) {
1220 cpu_set(i, cpu_sibling_map[i]);
1221 continue;
1222 }
1223
1224 for_each_possible_cpu(j) {
1225 if (cpu_data(i).core_id ==
1226 cpu_data(j).core_id)
1227 cpu_set(j, cpu_sibling_map[i]);
1228 }
1229 }
1da177e4
LT
1230}
1231
b282b6f8 1232int __cpuinit __cpu_up(unsigned int cpu)
1da177e4
LT
1233{
1234 int ret = smp_boot_one_cpu(cpu);
1235
1236 if (!ret) {
1237 cpu_set(cpu, smp_commenced_mask);
1238 while (!cpu_isset(cpu, cpu_online_map))
1239 mb();
1240 if (!cpu_isset(cpu, cpu_online_map)) {
1241 ret = -ENODEV;
1242 } else {
02fead75
DM
1243 /* On SUN4V, writes to %tick and %stick are
1244 * not allowed.
1245 */
1246 if (tlb_type != hypervisor)
1247 smp_synchronize_one_tick(cpu);
1da177e4
LT
1248 }
1249 }
1250 return ret;
1251}
1252
1253void __init smp_cpus_done(unsigned int max_cpus)
1254{
1255 unsigned long bogosum = 0;
1256 int i;
1257
394e3902
AM
1258 for_each_online_cpu(i)
1259 bogosum += cpu_data(i).udelay_val;
1da177e4
LT
1260 printk("Total of %ld processors activated "
1261 "(%lu.%02lu BogoMIPS).\n",
1262 (long) num_online_cpus(),
1263 bogosum/(500000/HZ),
1264 (bogosum/(5000/HZ))%100);
1265}
1266
1da177e4
LT
1267void smp_send_reschedule(int cpu)
1268{
64c7c8f8 1269 smp_receive_signal(cpu);
1da177e4
LT
1270}
1271
1272/* This is a nop because we capture all other cpus
1273 * anyways when making the PROM active.
1274 */
1275void smp_send_stop(void)
1276{
1277}
1278
d369ddd2
DM
1279unsigned long __per_cpu_base __read_mostly;
1280unsigned long __per_cpu_shift __read_mostly;
1da177e4
LT
1281
1282EXPORT_SYMBOL(__per_cpu_base);
1283EXPORT_SYMBOL(__per_cpu_shift);
1284
5cbc3073 1285void __init real_setup_per_cpu_areas(void)
1da177e4
LT
1286{
1287 unsigned long goal, size, i;
1288 char *ptr;
1da177e4
LT
1289
1290 /* Copy section for each CPU (we discard the original) */
5a089006
DM
1291 goal = PERCPU_ENOUGH_ROOM;
1292
b6e3590f
JF
1293 __per_cpu_shift = PAGE_SHIFT;
1294 for (size = PAGE_SIZE; size < goal; size <<= 1UL)
1da177e4
LT
1295 __per_cpu_shift++;
1296
b6e3590f 1297 ptr = alloc_bootmem_pages(size * NR_CPUS);
1da177e4
LT
1298
1299 __per_cpu_base = ptr - __per_cpu_start;
1300
1da177e4
LT
1301 for (i = 0; i < NR_CPUS; i++, ptr += size)
1302 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
951bc82c
DM
1303
1304 /* Setup %g5 for the boot cpu. */
1305 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1da177e4 1306}
This page took 0.292598 seconds and 5 git commands to generate.