1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/jiffies.h>
23 #include <linux/profile.h>
24 #include <linux/bootmem.h>
27 #include <asm/ptrace.h>
28 #include <asm/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/cpudata.h>
35 #include <asm/pgtable.h>
36 #include <asm/oplib.h>
37 #include <asm/uaccess.h>
38 #include <asm/timer.h>
39 #include <asm/starfire.h>
41 #include <asm/sections.h>
43 extern void calibrate_delay(void);
45 /* Please don't make this stuff initdata!!! --DaveM */
46 static unsigned char boot_cpu_id
;
48 cpumask_t cpu_online_map __read_mostly
= CPU_MASK_NONE
;
49 cpumask_t phys_cpu_present_map __read_mostly
= CPU_MASK_NONE
;
50 static cpumask_t smp_commenced_mask
;
51 static cpumask_t cpu_callout_map
;
53 void smp_info(struct seq_file
*m
)
57 seq_printf(m
, "State:\n");
58 for (i
= 0; i
< NR_CPUS
; i
++) {
61 "CPU%d:\t\tonline\n", i
);
65 void smp_bogo(struct seq_file
*m
)
69 for (i
= 0; i
< NR_CPUS
; i
++)
72 "Cpu%dBogo\t: %lu.%02lu\n"
73 "Cpu%dClkTck\t: %016lx\n",
74 i
, cpu_data(i
).udelay_val
/ (500000/HZ
),
75 (cpu_data(i
).udelay_val
/ (5000/HZ
)) % 100,
76 i
, cpu_data(i
).clock_tick
);
79 void __init
smp_store_cpu_info(int id
)
83 /* multiplier and counter set by
84 smp_setup_percpu_timer() */
85 cpu_data(id
).udelay_val
= loops_per_jiffy
;
87 cpu_find_by_mid(id
, &cpu_node
);
88 cpu_data(id
).clock_tick
= prom_getintdefault(cpu_node
,
89 "clock-frequency", 0);
91 cpu_data(id
).idle_volume
= 1;
93 cpu_data(id
).dcache_size
= prom_getintdefault(cpu_node
, "dcache-size",
95 cpu_data(id
).dcache_line_size
=
96 prom_getintdefault(cpu_node
, "dcache-line-size", 32);
97 cpu_data(id
).icache_size
= prom_getintdefault(cpu_node
, "icache-size",
99 cpu_data(id
).icache_line_size
=
100 prom_getintdefault(cpu_node
, "icache-line-size", 32);
101 cpu_data(id
).ecache_size
= prom_getintdefault(cpu_node
, "ecache-size",
103 cpu_data(id
).ecache_line_size
=
104 prom_getintdefault(cpu_node
, "ecache-line-size", 64);
105 printk("CPU[%d]: Caches "
106 "D[sz(%d):line_sz(%d)] "
107 "I[sz(%d):line_sz(%d)] "
108 "E[sz(%d):line_sz(%d)]\n",
110 cpu_data(id
).dcache_size
, cpu_data(id
).dcache_line_size
,
111 cpu_data(id
).icache_size
, cpu_data(id
).icache_line_size
,
112 cpu_data(id
).ecache_size
, cpu_data(id
).ecache_line_size
);
115 static void smp_setup_percpu_timer(void);
117 static volatile unsigned long callin_flag
= 0;
119 void __init
smp_callin(void)
121 int cpuid
= hard_smp_processor_id();
123 __local_per_cpu_offset
= __per_cpu_offset(cpuid
);
125 if (tlb_type
== hypervisor
) {
126 sun4v_register_fault_status();
127 sun4v_ktsb_register();
132 smp_setup_percpu_timer();
134 if (cheetah_pcache_forced_on
)
135 cheetah_enable_pcache();
140 smp_store_cpu_info(cpuid
);
142 __asm__
__volatile__("membar #Sync\n\t"
143 "flush %%g6" : : : "memory");
145 /* Clear this or we will die instantly when we
146 * schedule back to this idler...
148 current_thread_info()->new_child
= 0;
150 /* Attach to the address space of init_task. */
151 atomic_inc(&init_mm
.mm_count
);
152 current
->active_mm
= &init_mm
;
154 while (!cpu_isset(cpuid
, smp_commenced_mask
))
157 cpu_set(cpuid
, cpu_online_map
);
159 /* idle thread is expected to have preempt disabled */
165 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
166 panic("SMP bolixed\n");
169 static unsigned long current_tick_offset __read_mostly
;
171 /* This tick register synchronization scheme is taken entirely from
172 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
174 * The only change I've made is to rework it so that the master
175 * initiates the synchonization instead of the slave. -DaveM
179 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
181 #define NUM_ROUNDS 64 /* magic value */
182 #define NUM_ITERS 5 /* likewise */
184 static DEFINE_SPINLOCK(itc_sync_lock
);
185 static unsigned long go
[SLAVE
+ 1];
187 #define DEBUG_TICK_SYNC 0
189 static inline long get_delta (long *rt
, long *master
)
191 unsigned long best_t0
= 0, best_t1
= ~0UL, best_tm
= 0;
192 unsigned long tcenter
, t0
, t1
, tm
;
195 for (i
= 0; i
< NUM_ITERS
; i
++) {
196 t0
= tick_ops
->get_tick();
199 while (!(tm
= go
[SLAVE
]))
203 t1
= tick_ops
->get_tick();
205 if (t1
- t0
< best_t1
- best_t0
)
206 best_t0
= t0
, best_t1
= t1
, best_tm
= tm
;
209 *rt
= best_t1
- best_t0
;
210 *master
= best_tm
- best_t0
;
212 /* average best_t0 and best_t1 without overflow: */
213 tcenter
= (best_t0
/2 + best_t1
/2);
214 if (best_t0
% 2 + best_t1
% 2 == 2)
216 return tcenter
- best_tm
;
219 void smp_synchronize_tick_client(void)
221 long i
, delta
, adj
, adjust_latency
= 0, done
= 0;
222 unsigned long flags
, rt
, master_time_stamp
, bound
;
225 long rt
; /* roundtrip time */
226 long master
; /* master's timestamp */
227 long diff
; /* difference between midpoint and master's timestamp */
228 long lat
; /* estimate of itc adjustment latency */
237 local_irq_save(flags
);
239 for (i
= 0; i
< NUM_ROUNDS
; i
++) {
240 delta
= get_delta(&rt
, &master_time_stamp
);
242 done
= 1; /* let's lock on to this... */
248 adjust_latency
+= -delta
;
249 adj
= -delta
+ adjust_latency
/4;
253 tick_ops
->add_tick(adj
, current_tick_offset
);
257 t
[i
].master
= master_time_stamp
;
259 t
[i
].lat
= adjust_latency
/4;
263 local_irq_restore(flags
);
266 for (i
= 0; i
< NUM_ROUNDS
; i
++)
267 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
268 t
[i
].rt
, t
[i
].master
, t
[i
].diff
, t
[i
].lat
);
271 printk(KERN_INFO
"CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
272 "maxerr %lu cycles)\n", smp_processor_id(), delta
, rt
);
275 static void smp_start_sync_tick_client(int cpu
);
277 static void smp_synchronize_one_tick(int cpu
)
279 unsigned long flags
, i
;
283 smp_start_sync_tick_client(cpu
);
285 /* wait for client to be ready */
289 /* now let the client proceed into his loop */
293 spin_lock_irqsave(&itc_sync_lock
, flags
);
295 for (i
= 0; i
< NUM_ROUNDS
*NUM_ITERS
; i
++) {
300 go
[SLAVE
] = tick_ops
->get_tick();
304 spin_unlock_irqrestore(&itc_sync_lock
, flags
);
307 extern unsigned long sparc64_cpu_startup
;
309 /* The OBP cpu startup callback truncates the 3rd arg cookie to
310 * 32-bits (I think) so to be safe we have it read the pointer
311 * contained here so we work on >4GB machines. -DaveM
313 static struct thread_info
*cpu_new_thread
= NULL
;
315 static int __devinit
smp_boot_one_cpu(unsigned int cpu
)
317 unsigned long entry
=
318 (unsigned long)(&sparc64_cpu_startup
);
319 unsigned long cookie
=
320 (unsigned long)(&cpu_new_thread
);
321 struct task_struct
*p
;
322 int timeout
, ret
, cpu_node
;
326 cpu_new_thread
= task_thread_info(p
);
327 cpu_set(cpu
, cpu_callout_map
);
329 cpu_find_by_mid(cpu
, &cpu_node
);
330 prom_startcpu(cpu_node
, entry
, cookie
);
332 for (timeout
= 0; timeout
< 5000000; timeout
++) {
340 printk("Processor %d is stuck.\n", cpu
);
341 cpu_clear(cpu
, cpu_callout_map
);
344 cpu_new_thread
= NULL
;
349 static void spitfire_xcall_helper(u64 data0
, u64 data1
, u64 data2
, u64 pstate
, unsigned long cpu
)
354 if (this_is_starfire
) {
355 /* map to real upaid */
356 cpu
= (((cpu
& 0x3c) << 1) |
357 ((cpu
& 0x40) >> 4) |
361 target
= (cpu
<< 14) | 0x70;
363 /* Ok, this is the real Spitfire Errata #54.
364 * One must read back from a UDB internal register
365 * after writes to the UDB interrupt dispatch, but
366 * before the membar Sync for that write.
367 * So we use the high UDB control register (ASI 0x7f,
368 * ADDR 0x20) for the dummy read. -DaveM
371 __asm__
__volatile__(
372 "wrpr %1, %2, %%pstate\n\t"
373 "stxa %4, [%0] %3\n\t"
374 "stxa %5, [%0+%8] %3\n\t"
376 "stxa %6, [%0+%8] %3\n\t"
378 "stxa %%g0, [%7] %3\n\t"
381 "ldxa [%%g1] 0x7f, %%g0\n\t"
384 : "r" (pstate
), "i" (PSTATE_IE
), "i" (ASI_INTR_W
),
385 "r" (data0
), "r" (data1
), "r" (data2
), "r" (target
),
386 "r" (0x10), "0" (tmp
)
389 /* NOTE: PSTATE_IE is still clear. */
392 __asm__
__volatile__("ldxa [%%g0] %1, %0"
394 : "i" (ASI_INTR_DISPATCH_STAT
));
396 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
403 } while (result
& 0x1);
404 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
407 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
408 smp_processor_id(), result
);
415 static __inline__
void spitfire_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
420 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
421 for_each_cpu_mask(i
, mask
)
422 spitfire_xcall_helper(data0
, data1
, data2
, pstate
, i
);
425 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
426 * packet, but we have no use for that. However we do take advantage of
427 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
429 static void cheetah_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
432 int nack_busy_id
, is_jbus
;
434 if (cpus_empty(mask
))
437 /* Unfortunately, someone at Sun had the brilliant idea to make the
438 * busy/nack fields hard-coded by ITID number for this Ultra-III
439 * derivative processor.
441 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
442 is_jbus
= ((ver
>> 32) == __JALAPENO_ID
||
443 (ver
>> 32) == __SERRANO_ID
);
445 __asm__
__volatile__("rdpr %%pstate, %0" : "=r" (pstate
));
448 __asm__
__volatile__("wrpr %0, %1, %%pstate\n\t"
449 : : "r" (pstate
), "i" (PSTATE_IE
));
451 /* Setup the dispatch data registers. */
452 __asm__
__volatile__("stxa %0, [%3] %6\n\t"
453 "stxa %1, [%4] %6\n\t"
454 "stxa %2, [%5] %6\n\t"
457 : "r" (data0
), "r" (data1
), "r" (data2
),
458 "r" (0x40), "r" (0x50), "r" (0x60),
465 for_each_cpu_mask(i
, mask
) {
466 u64 target
= (i
<< 14) | 0x70;
469 target
|= (nack_busy_id
<< 24);
470 __asm__
__volatile__(
471 "stxa %%g0, [%0] %1\n\t"
474 : "r" (target
), "i" (ASI_INTR_W
));
479 /* Now, poll for completion. */
484 stuck
= 100000 * nack_busy_id
;
486 __asm__
__volatile__("ldxa [%%g0] %1, %0"
487 : "=r" (dispatch_stat
)
488 : "i" (ASI_INTR_DISPATCH_STAT
));
489 if (dispatch_stat
== 0UL) {
490 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
496 } while (dispatch_stat
& 0x5555555555555555UL
);
498 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
501 if ((dispatch_stat
& ~(0x5555555555555555UL
)) == 0) {
502 /* Busy bits will not clear, continue instead
503 * of freezing up on this cpu.
505 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
506 smp_processor_id(), dispatch_stat
);
508 int i
, this_busy_nack
= 0;
510 /* Delay some random time with interrupts enabled
511 * to prevent deadlock.
513 udelay(2 * nack_busy_id
);
515 /* Clear out the mask bits for cpus which did not
518 for_each_cpu_mask(i
, mask
) {
522 check_mask
= (0x2UL
<< (2*i
));
524 check_mask
= (0x2UL
<<
526 if ((dispatch_stat
& check_mask
) == 0)
537 /* Multi-cpu list version. */
538 static int init_cpu_list(u16
*list
, cpumask_t mask
)
543 for_each_cpu_mask(i
, mask
)
549 static int update_cpu_list(u16
*list
, int orig_cnt
, cpumask_t mask
)
553 for (i
= 0; i
< orig_cnt
; i
++) {
554 if (list
[i
] == 0xffff)
558 return init_cpu_list(list
, mask
);
561 static void hypervisor_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
563 int this_cpu
= get_cpu();
564 struct trap_per_cpu
*tb
= &trap_block
[this_cpu
];
565 u64
*mondo
= __va(tb
->cpu_mondo_block_pa
);
566 u16
*cpu_list
= __va(tb
->cpu_list_pa
);
575 cnt
= init_cpu_list(cpu_list
, mask
);
577 register unsigned long func
__asm__("%o5");
578 register unsigned long arg0
__asm__("%o0");
579 register unsigned long arg1
__asm__("%o1");
580 register unsigned long arg2
__asm__("%o2");
582 func
= HV_FAST_CPU_MONDO_SEND
;
584 arg1
= tb
->cpu_list_pa
;
585 arg2
= tb
->cpu_mondo_block_pa
;
587 __asm__
__volatile__("ta %8"
588 : "=&r" (func
), "=&r" (arg0
),
589 "=&r" (arg1
), "=&r" (arg2
)
590 : "0" (func
), "1" (arg0
),
591 "2" (arg1
), "3" (arg2
),
594 if (likely(arg0
== HV_EOK
))
597 if (unlikely(++retries
> 100)) {
598 printk("CPU[%d]: sun4v mondo error %lu\n",
603 cnt
= update_cpu_list(cpu_list
, cnt
, mask
);
611 /* Single-cpu list version. */
612 static void hypervisor_xcall_deliver(u64 data0
, u64 data1
, u64 data2
, cpumask_t mask
)
614 int this_cpu
= get_cpu();
615 struct trap_per_cpu
*tb
= &trap_block
[this_cpu
];
616 u64
*mondo
= __va(tb
->cpu_mondo_block_pa
);
617 u16
*cpu_list
= __va(tb
->cpu_list_pa
);
625 for_each_cpu_mask(i
, mask
) {
629 register unsigned long func
__asm__("%o5");
630 register unsigned long arg0
__asm__("%o0");
631 register unsigned long arg1
__asm__("%o1");
632 register unsigned long arg2
__asm__("%o2");
635 func
= HV_FAST_CPU_MONDO_SEND
;
637 arg1
= tb
->cpu_list_pa
;
638 arg2
= tb
->cpu_mondo_block_pa
;
640 __asm__
__volatile__("ta %8"
641 : "=&r" (func
), "=&r" (arg0
),
642 "=&r" (arg1
), "=&r" (arg2
)
643 : "0" (func
), "1" (arg0
),
644 "2" (arg1
), "3" (arg2
),
647 if (likely(arg0
== HV_EOK
))
650 if (unlikely(++retries
> 100)) {
651 printk("CPU[%d]: sun4v mondo error %lu\n",
664 /* Send cross call to all processors mentioned in MASK
667 static void smp_cross_call_masked(unsigned long *func
, u32 ctx
, u64 data1
, u64 data2
, cpumask_t mask
)
669 u64 data0
= (((u64
)ctx
)<<32 | (((u64
)func
) & 0xffffffff));
670 int this_cpu
= get_cpu();
672 cpus_and(mask
, mask
, cpu_online_map
);
673 cpu_clear(this_cpu
, mask
);
675 if (tlb_type
== spitfire
)
676 spitfire_xcall_deliver(data0
, data1
, data2
, mask
);
677 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
678 cheetah_xcall_deliver(data0
, data1
, data2
, mask
);
680 hypervisor_xcall_deliver(data0
, data1
, data2
, mask
);
681 /* NOTE: Caller runs local copy on master. */
686 extern unsigned long xcall_sync_tick
;
688 static void smp_start_sync_tick_client(int cpu
)
690 cpumask_t mask
= cpumask_of_cpu(cpu
);
692 smp_cross_call_masked(&xcall_sync_tick
,
696 /* Send cross call to all processors except self. */
697 #define smp_cross_call(func, ctx, data1, data2) \
698 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
700 struct call_data_struct
{
701 void (*func
) (void *info
);
707 static DEFINE_SPINLOCK(call_lock
);
708 static struct call_data_struct
*call_data
;
710 extern unsigned long xcall_call_function
;
713 * You must not call this function with disabled interrupts or from a
714 * hardware interrupt handler or from a bottom half handler.
716 static int smp_call_function_mask(void (*func
)(void *info
), void *info
,
717 int nonatomic
, int wait
, cpumask_t mask
)
719 struct call_data_struct data
;
720 int cpus
= cpus_weight(mask
) - 1;
726 /* Can deadlock when called with interrupts disabled */
727 WARN_ON(irqs_disabled());
731 atomic_set(&data
.finished
, 0);
734 spin_lock(&call_lock
);
738 smp_cross_call_masked(&xcall_call_function
, 0, 0, 0, mask
);
741 * Wait for other cpus to complete function or at
742 * least snap the call data.
745 while (atomic_read(&data
.finished
) != cpus
) {
752 spin_unlock(&call_lock
);
757 spin_unlock(&call_lock
);
758 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
759 (long) num_online_cpus() - 1L,
760 (long) atomic_read(&data
.finished
));
764 int smp_call_function(void (*func
)(void *info
), void *info
,
765 int nonatomic
, int wait
)
767 return smp_call_function_mask(func
, info
, nonatomic
, wait
,
771 void smp_call_function_client(int irq
, struct pt_regs
*regs
)
773 void (*func
) (void *info
) = call_data
->func
;
774 void *info
= call_data
->info
;
776 clear_softint(1 << irq
);
777 if (call_data
->wait
) {
778 /* let initiator proceed only after completion */
780 atomic_inc(&call_data
->finished
);
782 /* let initiator proceed after getting data */
783 atomic_inc(&call_data
->finished
);
788 static void tsb_sync(void *info
)
790 struct mm_struct
*mm
= info
;
792 if (current
->active_mm
== mm
)
793 tsb_context_switch(mm
);
796 void smp_tsb_sync(struct mm_struct
*mm
)
798 smp_call_function_mask(tsb_sync
, mm
, 0, 1, mm
->cpu_vm_mask
);
801 extern unsigned long xcall_flush_tlb_mm
;
802 extern unsigned long xcall_flush_tlb_pending
;
803 extern unsigned long xcall_flush_tlb_kernel_range
;
804 extern unsigned long xcall_report_regs
;
805 extern unsigned long xcall_receive_signal
;
807 #ifdef DCACHE_ALIASING_POSSIBLE
808 extern unsigned long xcall_flush_dcache_page_cheetah
;
810 extern unsigned long xcall_flush_dcache_page_spitfire
;
812 #ifdef CONFIG_DEBUG_DCFLUSH
813 extern atomic_t dcpage_flushes
;
814 extern atomic_t dcpage_flushes_xcall
;
817 static __inline__
void __local_flush_dcache_page(struct page
*page
)
819 #ifdef DCACHE_ALIASING_POSSIBLE
820 __flush_dcache_page(page_address(page
),
821 ((tlb_type
== spitfire
) &&
822 page_mapping(page
) != NULL
));
824 if (page_mapping(page
) != NULL
&&
825 tlb_type
== spitfire
)
826 __flush_icache_page(__pa(page_address(page
)));
830 void smp_flush_dcache_page_impl(struct page
*page
, int cpu
)
832 cpumask_t mask
= cpumask_of_cpu(cpu
);
835 if (tlb_type
== hypervisor
)
838 #ifdef CONFIG_DEBUG_DCFLUSH
839 atomic_inc(&dcpage_flushes
);
842 this_cpu
= get_cpu();
844 if (cpu
== this_cpu
) {
845 __local_flush_dcache_page(page
);
846 } else if (cpu_online(cpu
)) {
847 void *pg_addr
= page_address(page
);
850 if (tlb_type
== spitfire
) {
852 ((u64
)&xcall_flush_dcache_page_spitfire
);
853 if (page_mapping(page
) != NULL
)
854 data0
|= ((u64
)1 << 32);
855 spitfire_xcall_deliver(data0
,
859 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
860 #ifdef DCACHE_ALIASING_POSSIBLE
862 ((u64
)&xcall_flush_dcache_page_cheetah
);
863 cheetah_xcall_deliver(data0
,
868 #ifdef CONFIG_DEBUG_DCFLUSH
869 atomic_inc(&dcpage_flushes_xcall
);
876 void flush_dcache_page_all(struct mm_struct
*mm
, struct page
*page
)
878 void *pg_addr
= page_address(page
);
879 cpumask_t mask
= cpu_online_map
;
883 if (tlb_type
== hypervisor
)
886 this_cpu
= get_cpu();
888 cpu_clear(this_cpu
, mask
);
890 #ifdef CONFIG_DEBUG_DCFLUSH
891 atomic_inc(&dcpage_flushes
);
893 if (cpus_empty(mask
))
895 if (tlb_type
== spitfire
) {
896 data0
= ((u64
)&xcall_flush_dcache_page_spitfire
);
897 if (page_mapping(page
) != NULL
)
898 data0
|= ((u64
)1 << 32);
899 spitfire_xcall_deliver(data0
,
903 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
904 #ifdef DCACHE_ALIASING_POSSIBLE
905 data0
= ((u64
)&xcall_flush_dcache_page_cheetah
);
906 cheetah_xcall_deliver(data0
,
911 #ifdef CONFIG_DEBUG_DCFLUSH
912 atomic_inc(&dcpage_flushes_xcall
);
915 __local_flush_dcache_page(page
);
920 void smp_receive_signal(int cpu
)
922 cpumask_t mask
= cpumask_of_cpu(cpu
);
924 if (cpu_online(cpu
)) {
925 u64 data0
= (((u64
)&xcall_receive_signal
) & 0xffffffff);
927 if (tlb_type
== spitfire
)
928 spitfire_xcall_deliver(data0
, 0, 0, mask
);
929 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
930 cheetah_xcall_deliver(data0
, 0, 0, mask
);
931 else if (tlb_type
== hypervisor
)
932 hypervisor_xcall_deliver(data0
, 0, 0, mask
);
936 void smp_receive_signal_client(int irq
, struct pt_regs
*regs
)
938 /* Just return, rtrap takes care of the rest. */
939 clear_softint(1 << irq
);
942 void smp_report_regs(void)
944 smp_cross_call(&xcall_report_regs
, 0, 0, 0);
947 /* We know that the window frames of the user have been flushed
948 * to the stack before we get here because all callers of us
949 * are flush_tlb_*() routines, and these run after flush_cache_*()
950 * which performs the flushw.
952 * The SMP TLB coherency scheme we use works as follows:
954 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
955 * space has (potentially) executed on, this is the heuristic
956 * we use to avoid doing cross calls.
958 * Also, for flushing from kswapd and also for clones, we
959 * use cpu_vm_mask as the list of cpus to make run the TLB.
961 * 2) TLB context numbers are shared globally across all processors
962 * in the system, this allows us to play several games to avoid
965 * One invariant is that when a cpu switches to a process, and
966 * that processes tsk->active_mm->cpu_vm_mask does not have the
967 * current cpu's bit set, that tlb context is flushed locally.
969 * If the address space is non-shared (ie. mm->count == 1) we avoid
970 * cross calls when we want to flush the currently running process's
971 * tlb state. This is done by clearing all cpu bits except the current
972 * processor's in current->active_mm->cpu_vm_mask and performing the
973 * flush locally only. This will force any subsequent cpus which run
974 * this task to flush the context from the local tlb if the process
975 * migrates to another cpu (again).
977 * 3) For shared address spaces (threads) and swapping we bite the
978 * bullet for most cases and perform the cross call (but only to
979 * the cpus listed in cpu_vm_mask).
981 * The performance gain from "optimizing" away the cross call for threads is
982 * questionable (in theory the big win for threads is the massive sharing of
983 * address space state across processors).
986 /* This currently is only used by the hugetlb arch pre-fault
987 * hook on UltraSPARC-III+ and later when changing the pagesize
988 * bits of the context register for an address space.
990 void smp_flush_tlb_mm(struct mm_struct
*mm
)
992 u32 ctx
= CTX_HWBITS(mm
->context
);
995 if (atomic_read(&mm
->mm_users
) == 1) {
996 mm
->cpu_vm_mask
= cpumask_of_cpu(cpu
);
997 goto local_flush_and_out
;
1000 smp_cross_call_masked(&xcall_flush_tlb_mm
,
1004 local_flush_and_out
:
1005 __flush_tlb_mm(ctx
, SECONDARY_CONTEXT
);
1010 void smp_flush_tlb_pending(struct mm_struct
*mm
, unsigned long nr
, unsigned long *vaddrs
)
1012 u32 ctx
= CTX_HWBITS(mm
->context
);
1013 int cpu
= get_cpu();
1015 if (mm
== current
->active_mm
&& atomic_read(&mm
->mm_users
) == 1)
1016 mm
->cpu_vm_mask
= cpumask_of_cpu(cpu
);
1018 smp_cross_call_masked(&xcall_flush_tlb_pending
,
1019 ctx
, nr
, (unsigned long) vaddrs
,
1022 __flush_tlb_pending(ctx
, nr
, vaddrs
);
1027 void smp_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
1030 end
= PAGE_ALIGN(end
);
1032 smp_cross_call(&xcall_flush_tlb_kernel_range
,
1035 __flush_tlb_kernel_range(start
, end
);
1040 /* #define CAPTURE_DEBUG */
1041 extern unsigned long xcall_capture
;
1043 static atomic_t smp_capture_depth
= ATOMIC_INIT(0);
1044 static atomic_t smp_capture_registry
= ATOMIC_INIT(0);
1045 static unsigned long penguins_are_doing_time
;
1047 void smp_capture(void)
1049 int result
= atomic_add_ret(1, &smp_capture_depth
);
1052 int ncpus
= num_online_cpus();
1054 #ifdef CAPTURE_DEBUG
1055 printk("CPU[%d]: Sending penguins to jail...",
1056 smp_processor_id());
1058 penguins_are_doing_time
= 1;
1059 membar_storestore_loadstore();
1060 atomic_inc(&smp_capture_registry
);
1061 smp_cross_call(&xcall_capture
, 0, 0, 0);
1062 while (atomic_read(&smp_capture_registry
) != ncpus
)
1064 #ifdef CAPTURE_DEBUG
1070 void smp_release(void)
1072 if (atomic_dec_and_test(&smp_capture_depth
)) {
1073 #ifdef CAPTURE_DEBUG
1074 printk("CPU[%d]: Giving pardon to "
1075 "imprisoned penguins\n",
1076 smp_processor_id());
1078 penguins_are_doing_time
= 0;
1079 membar_storeload_storestore();
1080 atomic_dec(&smp_capture_registry
);
1084 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1085 * can service tlb flush xcalls...
1087 extern void prom_world(int);
1089 void smp_penguin_jailcell(int irq
, struct pt_regs
*regs
)
1091 clear_softint(1 << irq
);
1095 __asm__
__volatile__("flushw");
1097 atomic_inc(&smp_capture_registry
);
1098 membar_storeload_storestore();
1099 while (penguins_are_doing_time
)
1101 atomic_dec(&smp_capture_registry
);
1107 #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1108 #define prof_counter(__cpu) cpu_data(__cpu).counter
1110 void smp_percpu_timer_interrupt(struct pt_regs
*regs
)
1112 unsigned long compare
, tick
, pstate
;
1113 int cpu
= smp_processor_id();
1114 int user
= user_mode(regs
);
1117 * Check for level 14 softint.
1120 unsigned long tick_mask
= tick_ops
->softint_mask
;
1122 if (!(get_softint() & tick_mask
)) {
1123 extern void handler_irq(int, struct pt_regs
*);
1125 handler_irq(14, regs
);
1128 clear_softint(tick_mask
);
1132 profile_tick(CPU_PROFILING
, regs
);
1133 if (!--prof_counter(cpu
)) {
1136 if (cpu
== boot_cpu_id
) {
1137 kstat_this_cpu
.irqs
[0]++;
1138 timer_tick_interrupt(regs
);
1141 update_process_times(user
);
1145 prof_counter(cpu
) = prof_multiplier(cpu
);
1148 /* Guarantee that the following sequences execute
1151 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
1152 "wrpr %0, %1, %%pstate"
1156 compare
= tick_ops
->add_compare(current_tick_offset
);
1157 tick
= tick_ops
->get_tick();
1159 /* Restore PSTATE_IE. */
1160 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
1163 } while (time_after_eq(tick
, compare
));
1166 static void __init
smp_setup_percpu_timer(void)
1168 int cpu
= smp_processor_id();
1169 unsigned long pstate
;
1171 prof_counter(cpu
) = prof_multiplier(cpu
) = 1;
1173 /* Guarantee that the following sequences execute
1176 __asm__
__volatile__("rdpr %%pstate, %0\n\t"
1177 "wrpr %0, %1, %%pstate"
1181 tick_ops
->init_tick(current_tick_offset
);
1183 /* Restore PSTATE_IE. */
1184 __asm__
__volatile__("wrpr %0, 0x0, %%pstate"
1189 void __init
smp_tick_init(void)
1191 boot_cpu_id
= hard_smp_processor_id();
1192 current_tick_offset
= timer_tick_offset
;
1194 cpu_set(boot_cpu_id
, cpu_online_map
);
1195 prof_counter(boot_cpu_id
) = prof_multiplier(boot_cpu_id
) = 1;
1198 /* /proc/profile writes can call this, don't __init it please. */
1199 static DEFINE_SPINLOCK(prof_setup_lock
);
1201 int setup_profiling_timer(unsigned int multiplier
)
1203 unsigned long flags
;
1206 if ((!multiplier
) || (timer_tick_offset
/ multiplier
) < 1000)
1209 spin_lock_irqsave(&prof_setup_lock
, flags
);
1210 for (i
= 0; i
< NR_CPUS
; i
++)
1211 prof_multiplier(i
) = multiplier
;
1212 current_tick_offset
= (timer_tick_offset
/ multiplier
);
1213 spin_unlock_irqrestore(&prof_setup_lock
, flags
);
1218 /* Constrain the number of cpus to max_cpus. */
1219 void __init
smp_prepare_cpus(unsigned int max_cpus
)
1221 if (num_possible_cpus() > max_cpus
) {
1225 while (!cpu_find_by_instance(instance
, NULL
, &mid
)) {
1226 if (mid
!= boot_cpu_id
) {
1227 cpu_clear(mid
, phys_cpu_present_map
);
1228 if (num_possible_cpus() <= max_cpus
)
1235 smp_store_cpu_info(boot_cpu_id
);
1238 /* Set this up early so that things like the scheduler can init
1239 * properly. We use the same cpu mask for both the present and
1242 void __init
smp_setup_cpu_possible_map(void)
1247 while (!cpu_find_by_instance(instance
, NULL
, &mid
)) {
1249 cpu_set(mid
, phys_cpu_present_map
);
1254 void __devinit
smp_prepare_boot_cpu(void)
1256 int cpu
= hard_smp_processor_id();
1258 if (cpu
>= NR_CPUS
) {
1259 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1263 current_thread_info()->cpu
= cpu
;
1264 __local_per_cpu_offset
= __per_cpu_offset(cpu
);
1266 cpu_set(smp_processor_id(), cpu_online_map
);
1267 cpu_set(smp_processor_id(), phys_cpu_present_map
);
1270 int __devinit
__cpu_up(unsigned int cpu
)
1272 int ret
= smp_boot_one_cpu(cpu
);
1275 cpu_set(cpu
, smp_commenced_mask
);
1276 while (!cpu_isset(cpu
, cpu_online_map
))
1278 if (!cpu_isset(cpu
, cpu_online_map
)) {
1281 smp_synchronize_one_tick(cpu
);
1287 void __init
smp_cpus_done(unsigned int max_cpus
)
1289 unsigned long bogosum
= 0;
1292 for (i
= 0; i
< NR_CPUS
; i
++) {
1294 bogosum
+= cpu_data(i
).udelay_val
;
1296 printk("Total of %ld processors activated "
1297 "(%lu.%02lu BogoMIPS).\n",
1298 (long) num_online_cpus(),
1299 bogosum
/(500000/HZ
),
1300 (bogosum
/(5000/HZ
))%100);
1303 void smp_send_reschedule(int cpu
)
1305 smp_receive_signal(cpu
);
1308 /* This is a nop because we capture all other cpus
1309 * anyways when making the PROM active.
1311 void smp_send_stop(void)
1315 unsigned long __per_cpu_base __read_mostly
;
1316 unsigned long __per_cpu_shift __read_mostly
;
1318 EXPORT_SYMBOL(__per_cpu_base
);
1319 EXPORT_SYMBOL(__per_cpu_shift
);
1321 void __init
setup_per_cpu_areas(void)
1323 unsigned long goal
, size
, i
;
1326 /* Copy section for each CPU (we discard the original) */
1327 goal
= ALIGN(__per_cpu_end
- __per_cpu_start
, SMP_CACHE_BYTES
);
1328 #ifdef CONFIG_MODULES
1329 if (goal
< PERCPU_ENOUGH_ROOM
)
1330 goal
= PERCPU_ENOUGH_ROOM
;
1332 __per_cpu_shift
= 0;
1333 for (size
= 1UL; size
< goal
; size
<<= 1UL)
1336 ptr
= alloc_bootmem(size
* NR_CPUS
);
1338 __per_cpu_base
= ptr
- __per_cpu_start
;
1340 for (i
= 0; i
< NR_CPUS
; i
++, ptr
+= size
)
1341 memcpy(ptr
, __per_cpu_start
, __per_cpu_end
- __per_cpu_start
);