Commit | Line | Data |
---|---|---|
250c2277 | 1 | /* |
835c34a1 | 2 | * check TSC synchronization. |
250c2277 TG |
3 | * |
4 | * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar | |
5 | * | |
6 | * We check whether all boot CPUs have their TSC's synchronized, | |
7 | * print a warning if not and turn off the TSC clock-source. | |
8 | * | |
9 | * The warp-check is point-to-point between two CPUs, the CPU | |
10 | * initiating the bootup is the 'source CPU', the freshly booting | |
11 | * CPU is the 'target CPU'. | |
12 | * | |
13 | * Only two CPUs may participate - they can enter in any order. | |
14 | * ( The serial nature of the boot logic and the CPU hotplug lock | |
15 | * protects against more than 2 CPUs entering this code. ) | |
16 | */ | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/smp.h> | |
21 | #include <linux/nmi.h> | |
22 | #include <asm/tsc.h> | |
23 | ||
24 | /* | |
25 | * Entry/exit counters that make sure that both CPUs | |
26 | * run the measurement code at once: | |
27 | */ | |
28 | static __cpuinitdata atomic_t start_count; | |
29 | static __cpuinitdata atomic_t stop_count; | |
30 | ||
31 | /* | |
32 | * We use a raw spinlock in this exceptional case, because | |
33 | * we want to have the fastest, inlined, non-debug version | |
34 | * of a critical section, to be able to prove TSC time-warps: | |
35 | */ | |
edc35bd7 | 36 | static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
643bec95 | 37 | |
250c2277 TG |
38 | static __cpuinitdata cycles_t last_tsc; |
39 | static __cpuinitdata cycles_t max_warp; | |
40 | static __cpuinitdata int nr_warps; | |
41 | ||
42 | /* | |
43 | * TSC-warp measurement loop running on both CPUs: | |
44 | */ | |
b0e5c779 | 45 | static __cpuinit void check_tsc_warp(unsigned int timeout) |
250c2277 TG |
46 | { |
47 | cycles_t start, now, prev, end; | |
48 | int i; | |
49 | ||
93ce99e8 | 50 | rdtsc_barrier(); |
6d63de8d | 51 | start = get_cycles(); |
93ce99e8 | 52 | rdtsc_barrier(); |
250c2277 | 53 | /* |
b0e5c779 | 54 | * The measurement runs for 'timeout' msecs: |
250c2277 | 55 | */ |
b0e5c779 | 56 | end = start + (cycles_t) tsc_khz * timeout; |
250c2277 TG |
57 | now = start; |
58 | ||
59 | for (i = 0; ; i++) { | |
60 | /* | |
61 | * We take the global lock, measure TSC, save the | |
62 | * previous TSC that was measured (possibly on | |
63 | * another CPU) and update the previous TSC timestamp. | |
64 | */ | |
0199c4e6 | 65 | arch_spin_lock(&sync_lock); |
250c2277 | 66 | prev = last_tsc; |
93ce99e8 | 67 | rdtsc_barrier(); |
6d63de8d | 68 | now = get_cycles(); |
93ce99e8 | 69 | rdtsc_barrier(); |
250c2277 | 70 | last_tsc = now; |
0199c4e6 | 71 | arch_spin_unlock(&sync_lock); |
250c2277 TG |
72 | |
73 | /* | |
74 | * Be nice every now and then (and also check whether | |
df43510b | 75 | * measurement is done [we also insert a 10 million |
250c2277 TG |
76 | * loops safety exit, so we dont lock up in case the |
77 | * TSC readout is totally broken]): | |
78 | */ | |
79 | if (unlikely(!(i & 7))) { | |
df43510b | 80 | if (now > end || i > 10000000) |
250c2277 TG |
81 | break; |
82 | cpu_relax(); | |
83 | touch_nmi_watchdog(); | |
84 | } | |
85 | /* | |
86 | * Outside the critical section we can now see whether | |
87 | * we saw a time-warp of the TSC going backwards: | |
88 | */ | |
89 | if (unlikely(prev > now)) { | |
0199c4e6 | 90 | arch_spin_lock(&sync_lock); |
250c2277 TG |
91 | max_warp = max(max_warp, prev - now); |
92 | nr_warps++; | |
0199c4e6 | 93 | arch_spin_unlock(&sync_lock); |
250c2277 | 94 | } |
ad8ca495 | 95 | } |
bde78a79 AV |
96 | WARN(!(now-start), |
97 | "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", | |
ad8ca495 | 98 | now-start, end-start); |
250c2277 TG |
99 | } |
100 | ||
b0e5c779 SS |
101 | /* |
102 | * If the target CPU coming online doesn't have any of its core-siblings | |
103 | * online, a timeout of 20msec will be used for the TSC-warp measurement | |
104 | * loop. Otherwise a smaller timeout of 2msec will be used, as we have some | |
105 | * information about this socket already (and this information grows as we | |
106 | * have more and more logical-siblings in that socket). | |
107 | * | |
108 | * Ideally we should be able to skip the TSC sync check on the other | |
109 | * core-siblings, if the first logical CPU in a socket passed the sync test. | |
110 | * But as the TSC is per-logical CPU and can potentially be modified wrongly | |
111 | * by the bios, TSC sync test for smaller duration should be able | |
112 | * to catch such errors. Also this will catch the condition where all the | |
113 | * cores in the socket doesn't get reset at the same time. | |
114 | */ | |
115 | static inline unsigned int loop_timeout(int cpu) | |
116 | { | |
117 | return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20; | |
118 | } | |
119 | ||
250c2277 TG |
120 | /* |
121 | * Source CPU calls into this - it waits for the freshly booted | |
122 | * target CPU to arrive and then starts the measurement: | |
123 | */ | |
124 | void __cpuinit check_tsc_sync_source(int cpu) | |
125 | { | |
126 | int cpus = 2; | |
127 | ||
128 | /* | |
129 | * No need to check if we already know that the TSC is not | |
130 | * synchronized: | |
131 | */ | |
132 | if (unsynchronized_tsc()) | |
133 | return; | |
134 | ||
28a00184 | 135 | if (tsc_clocksource_reliable) { |
9b3660a5 MT |
136 | if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) |
137 | pr_info( | |
138 | "Skipped synchronization checks as TSC is reliable.\n"); | |
eca0cd02 AK |
139 | return; |
140 | } | |
141 | ||
250c2277 TG |
142 | /* |
143 | * Reset it - in case this is a second bootup: | |
144 | */ | |
145 | atomic_set(&stop_count, 0); | |
146 | ||
147 | /* | |
148 | * Wait for the target to arrive: | |
149 | */ | |
150 | while (atomic_read(&start_count) != cpus-1) | |
151 | cpu_relax(); | |
152 | /* | |
153 | * Trigger the target to continue into the measurement too: | |
154 | */ | |
155 | atomic_inc(&start_count); | |
156 | ||
b0e5c779 | 157 | check_tsc_warp(loop_timeout(cpu)); |
250c2277 TG |
158 | |
159 | while (atomic_read(&stop_count) != cpus-1) | |
160 | cpu_relax(); | |
161 | ||
250c2277 | 162 | if (nr_warps) { |
9b3660a5 MT |
163 | pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n", |
164 | smp_processor_id(), cpu); | |
643bec95 IM |
165 | pr_warning("Measured %Ld cycles TSC warp between CPUs, " |
166 | "turning off TSC clock.\n", max_warp); | |
250c2277 | 167 | mark_tsc_unstable("check_tsc_sync_source failed"); |
250c2277 | 168 | } else { |
9b3660a5 MT |
169 | pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n", |
170 | smp_processor_id(), cpu); | |
250c2277 TG |
171 | } |
172 | ||
4c6b8b4d MG |
173 | /* |
174 | * Reset it - just in case we boot another CPU later: | |
175 | */ | |
176 | atomic_set(&start_count, 0); | |
177 | nr_warps = 0; | |
178 | max_warp = 0; | |
179 | last_tsc = 0; | |
180 | ||
250c2277 TG |
181 | /* |
182 | * Let the target continue with the bootup: | |
183 | */ | |
184 | atomic_inc(&stop_count); | |
185 | } | |
186 | ||
187 | /* | |
188 | * Freshly booted CPUs call into this: | |
189 | */ | |
190 | void __cpuinit check_tsc_sync_target(void) | |
191 | { | |
192 | int cpus = 2; | |
193 | ||
28a00184 | 194 | if (unsynchronized_tsc() || tsc_clocksource_reliable) |
250c2277 TG |
195 | return; |
196 | ||
197 | /* | |
198 | * Register this CPU's participation and wait for the | |
199 | * source CPU to start the measurement: | |
200 | */ | |
201 | atomic_inc(&start_count); | |
202 | while (atomic_read(&start_count) != cpus) | |
203 | cpu_relax(); | |
204 | ||
b0e5c779 | 205 | check_tsc_warp(loop_timeout(smp_processor_id())); |
250c2277 TG |
206 | |
207 | /* | |
208 | * Ok, we are done: | |
209 | */ | |
210 | atomic_inc(&stop_count); | |
211 | ||
212 | /* | |
213 | * Wait for the source CPU to print stuff: | |
214 | */ | |
215 | while (atomic_read(&stop_count) != cpus) | |
216 | cpu_relax(); | |
217 | } |