Commit | Line | Data |
---|---|---|
39b8d525 RB |
1 | /* |
2 | * Count register synchronisation. | |
3 | * | |
4 | * All CPUs will have their count registers synchronised to the CPU0 expirelo | |
5 | * value. This can cause a small timewarp for CPU0. All other CPU's should | |
6 | * not have done anything significant (but they may have had interrupts | |
7 | * enabled briefly - prom_smp_finish() should not be responsible for enabling | |
8 | * interrupts...) | |
9 | * | |
10 | * FIXME: broken for SMTC | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/irqflags.h> | |
16 | #include <linux/r4k-timer.h> | |
17 | ||
18 | #include <asm/atomic.h> | |
19 | #include <asm/barrier.h> | |
20 | #include <asm/cpumask.h> | |
21 | #include <asm/mipsregs.h> | |
22 | ||
23 | static atomic_t __initdata count_start_flag = ATOMIC_INIT(0); | |
24 | static atomic_t __initdata count_count_start = ATOMIC_INIT(0); | |
25 | static atomic_t __initdata count_count_stop = ATOMIC_INIT(0); | |
26 | ||
27 | #define COUNTON 100 | |
28 | #define NR_LOOPS 5 | |
29 | ||
30 | void __init synchronise_count_master(void) | |
31 | { | |
32 | int i; | |
33 | unsigned long flags; | |
34 | unsigned int initcount; | |
35 | int nslaves; | |
36 | ||
37 | #ifdef CONFIG_MIPS_MT_SMTC | |
38 | /* | |
39 | * SMTC needs to synchronise per VPE, not per CPU | |
40 | * ignore for now | |
41 | */ | |
42 | return; | |
43 | #endif | |
44 | ||
45 | pr_info("Checking COUNT synchronization across %u CPUs: ", | |
46 | num_online_cpus()); | |
47 | ||
48 | local_irq_save(flags); | |
49 | ||
50 | /* | |
51 | * Notify the slaves that it's time to start | |
52 | */ | |
53 | atomic_set(&count_start_flag, 1); | |
54 | smp_wmb(); | |
55 | ||
56 | /* Count will be initialised to expirelo for all CPU's */ | |
57 | initcount = expirelo; | |
58 | ||
59 | /* | |
60 | * We loop a few times to get a primed instruction cache, | |
61 | * then the last pass is more or less synchronised and | |
62 | * the master and slaves each set their cycle counters to a known | |
63 | * value all at once. This reduces the chance of having random offsets | |
64 | * between the processors, and guarantees that the maximum | |
65 | * delay between the cycle counters is never bigger than | |
66 | * the latency of information-passing (cachelines) between | |
67 | * two CPUs. | |
68 | */ | |
69 | ||
70 | nslaves = num_online_cpus()-1; | |
71 | for (i = 0; i < NR_LOOPS; i++) { | |
72 | /* slaves loop on '!= ncpus' */ | |
73 | while (atomic_read(&count_count_start) != nslaves) | |
74 | mb(); | |
75 | atomic_set(&count_count_stop, 0); | |
76 | smp_wmb(); | |
77 | ||
78 | /* this lets the slaves write their count register */ | |
79 | atomic_inc(&count_count_start); | |
80 | ||
81 | /* | |
82 | * Everyone initialises count in the last loop: | |
83 | */ | |
84 | if (i == NR_LOOPS-1) | |
85 | write_c0_count(initcount); | |
86 | ||
87 | /* | |
88 | * Wait for all slaves to leave the synchronization point: | |
89 | */ | |
90 | while (atomic_read(&count_count_stop) != nslaves) | |
91 | mb(); | |
92 | atomic_set(&count_count_start, 0); | |
93 | smp_wmb(); | |
94 | atomic_inc(&count_count_stop); | |
95 | } | |
96 | /* Arrange for an interrupt in a short while */ | |
97 | write_c0_compare(read_c0_count() + COUNTON); | |
98 | ||
99 | local_irq_restore(flags); | |
100 | ||
101 | /* | |
102 | * i386 code reported the skew here, but the | |
103 | * count registers were almost certainly out of sync | |
104 | * so no point in alarming people | |
105 | */ | |
106 | printk("done.\n"); | |
107 | } | |
108 | ||
109 | void __init synchronise_count_slave(void) | |
110 | { | |
111 | int i; | |
112 | unsigned long flags; | |
113 | unsigned int initcount; | |
114 | int ncpus; | |
115 | ||
116 | #ifdef CONFIG_MIPS_MT_SMTC | |
117 | /* | |
118 | * SMTC needs to synchronise per VPE, not per CPU | |
119 | * ignore for now | |
120 | */ | |
121 | return; | |
122 | #endif | |
123 | ||
124 | local_irq_save(flags); | |
125 | ||
126 | /* | |
127 | * Not every cpu is online at the time this gets called, | |
128 | * so we first wait for the master to say everyone is ready | |
129 | */ | |
130 | ||
131 | while (!atomic_read(&count_start_flag)) | |
132 | mb(); | |
133 | ||
134 | /* Count will be initialised to expirelo for all CPU's */ | |
135 | initcount = expirelo; | |
136 | ||
137 | ncpus = num_online_cpus(); | |
138 | for (i = 0; i < NR_LOOPS; i++) { | |
139 | atomic_inc(&count_count_start); | |
140 | while (atomic_read(&count_count_start) != ncpus) | |
141 | mb(); | |
142 | ||
143 | /* | |
144 | * Everyone initialises count in the last loop: | |
145 | */ | |
146 | if (i == NR_LOOPS-1) | |
147 | write_c0_count(initcount); | |
148 | ||
149 | atomic_inc(&count_count_stop); | |
150 | while (atomic_read(&count_count_stop) != ncpus) | |
151 | mb(); | |
152 | } | |
153 | /* Arrange for an interrupt in a short while */ | |
154 | write_c0_compare(read_c0_count() + COUNTON); | |
155 | ||
156 | local_irq_restore(flags); | |
157 | } | |
158 | #undef NR_LOOPS | |
159 | #endif |