Commit | Line | Data |
---|---|---|
0941ecb5 GC |
1 | /* |
2 | * Intel SMP support routines. | |
3 | * | |
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | |
5 | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> | |
6 | * (c) 2002,2003 Andi Kleen, SuSE Labs. | |
7 | * | |
8 | * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com> | |
9 | * | |
10 | * This code is released under the GNU General Public License version 2 or | |
11 | * later. | |
12 | */ | |
13 | ||
f9e47a12 GC |
14 | #include <linux/init.h> |
15 | ||
16 | #include <linux/mm.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/spinlock.h> | |
19 | #include <linux/kernel_stat.h> | |
20 | #include <linux/mc146818rtc.h> | |
21 | #include <linux/cache.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/cpu.h> | |
24 | ||
25 | #include <asm/mtrr.h> | |
26 | #include <asm/tlbflush.h> | |
27 | #include <asm/mmu_context.h> | |
28 | #include <asm/proto.h> | |
f9e47a12 | 29 | #include <mach_ipi.h> |
5af5573e | 30 | #include <mach_apic.h> |
0941ecb5 GC |
31 | /* |
32 | * Some notes on x86 processor bugs affecting SMP operation: | |
33 | * | |
34 | * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. | |
35 | * The Linux implications for SMP are handled as follows: | |
36 | * | |
37 | * Pentium III / [Xeon] | |
38 | * None of the E1AP-E3AP errata are visible to the user. | |
39 | * | |
40 | * E1AP. see PII A1AP | |
41 | * E2AP. see PII A2AP | |
42 | * E3AP. see PII A3AP | |
43 | * | |
44 | * Pentium II / [Xeon] | |
45 | * None of the A1AP-A3AP errata are visible to the user. | |
46 | * | |
47 | * A1AP. see PPro 1AP | |
48 | * A2AP. see PPro 2AP | |
49 | * A3AP. see PPro 7AP | |
50 | * | |
51 | * Pentium Pro | |
52 | * None of 1AP-9AP errata are visible to the normal user, | |
53 | * except occasional delivery of 'spurious interrupt' as trap #15. | |
54 | * This is very rare and a non-problem. | |
55 | * | |
56 | * 1AP. Linux maps APIC as non-cacheable | |
57 | * 2AP. worked around in hardware | |
58 | * 3AP. fixed in C0 and above steppings microcode update. | |
59 | * Linux does not use excessive STARTUP_IPIs. | |
60 | * 4AP. worked around in hardware | |
61 | * 5AP. symmetric IO mode (normal Linux operation) not affected. | |
62 | * 'noapic' mode has vector 0xf filled out properly. | |
63 | * 6AP. 'noapic' mode might be affected - fixed in later steppings | |
64 | * 7AP. We do not assume writes to the LVT deassering IRQs | |
65 | * 8AP. We do not enable low power mode (deep sleep) during MP bootup | |
66 | * 9AP. We do not use mixed mode | |
67 | * | |
68 | * Pentium | |
69 | * There is a marginal case where REP MOVS on 100MHz SMP | |
70 | * machines with B stepping processors can fail. XXX should provide | |
71 | * an L1cache=Writethrough or L1cache=off option. | |
72 | * | |
73 | * B stepping CPUs may hang. There are hardware work arounds | |
74 | * for this. We warn about it in case your board doesn't have the work | |
75 | * arounds. Basically that's so I can tell anyone with a B stepping | |
76 | * CPU and SMP problems "tough". | |
77 | * | |
78 | * Specific items [From Pentium Processor Specification Update] | |
79 | * | |
80 | * 1AP. Linux doesn't use remote read | |
81 | * 2AP. Linux doesn't trust APIC errors | |
82 | * 3AP. We work around this | |
83 | * 4AP. Linux never generated 3 interrupts of the same priority | |
84 | * to cause a lost local interrupt. | |
85 | * 5AP. Remote read is never used | |
86 | * 6AP. not affected - worked around in hardware | |
87 | * 7AP. not affected - worked around in hardware | |
88 | * 8AP. worked around in hardware - we get explicit CS errors if not | |
89 | * 9AP. only 'noapic' mode affected. Might generate spurious | |
90 | * interrupts, we log only the first one and count the | |
91 | * rest silently. | |
92 | * 10AP. not affected - worked around in hardware | |
93 | * 11AP. Linux reads the APIC between writes to avoid this, as per | |
94 | * the documentation. Make sure you preserve this as it affects | |
95 | * the C stepping chips too. | |
96 | * 12AP. not affected - worked around in hardware | |
97 | * 13AP. not affected - worked around in hardware | |
98 | * 14AP. we always deassert INIT during bootup | |
99 | * 15AP. not affected - worked around in hardware | |
100 | * 16AP. not affected - worked around in hardware | |
101 | * 17AP. not affected - worked around in hardware | |
102 | * 18AP. not affected - worked around in hardware | |
103 | * 19AP. not affected - worked around in BIOS | |
104 | * | |
105 | * If this sounds worrying believe me these bugs are either ___RARE___, | |
106 | * or are signal timing bugs worked around in hardware and there's | |
107 | * about nothing of note with C stepping upwards. | |
108 | */ | |
f9e47a12 GC |
109 | |
110 | /* | |
111 | * this function sends a 'reschedule' IPI to another CPU. | |
112 | * it goes straight through and wastes no time serializing | |
113 | * anything. Worst case is that we lose a reschedule ... | |
114 | */ | |
115 | static void native_smp_send_reschedule(int cpu) | |
116 | { | |
f6940101 GS |
117 | if (unlikely(cpu_is_offline(cpu))) { |
118 | WARN_ON(1); | |
119 | return; | |
120 | } | |
f9e47a12 GC |
121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); |
122 | } | |
123 | ||
124 | /* | |
125 | * Structure and data for smp_call_function(). This is designed to minimise | |
126 | * static memory requirements. It also looks cleaner. | |
127 | */ | |
128 | static DEFINE_SPINLOCK(call_lock); | |
129 | ||
130 | struct call_data_struct { | |
131 | void (*func) (void *info); | |
132 | void *info; | |
133 | atomic_t started; | |
134 | atomic_t finished; | |
135 | int wait; | |
136 | }; | |
137 | ||
138 | void lock_ipi_call_lock(void) | |
139 | { | |
140 | spin_lock_irq(&call_lock); | |
141 | } | |
142 | ||
143 | void unlock_ipi_call_lock(void) | |
144 | { | |
145 | spin_unlock_irq(&call_lock); | |
146 | } | |
147 | ||
148 | static struct call_data_struct *call_data; | |
149 | ||
150 | static void __smp_call_function(void (*func) (void *info), void *info, | |
151 | int nonatomic, int wait) | |
152 | { | |
153 | struct call_data_struct data; | |
154 | int cpus = num_online_cpus() - 1; | |
155 | ||
156 | if (!cpus) | |
157 | return; | |
158 | ||
159 | data.func = func; | |
160 | data.info = info; | |
161 | atomic_set(&data.started, 0); | |
162 | data.wait = wait; | |
163 | if (wait) | |
164 | atomic_set(&data.finished, 0); | |
165 | ||
166 | call_data = &data; | |
167 | mb(); | |
168 | ||
169 | /* Send a message to all other CPUs and wait for them to respond */ | |
170 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | |
171 | ||
172 | /* Wait for response */ | |
173 | while (atomic_read(&data.started) != cpus) | |
174 | cpu_relax(); | |
175 | ||
176 | if (wait) | |
177 | while (atomic_read(&data.finished) != cpus) | |
178 | cpu_relax(); | |
179 | } | |
180 | ||
181 | ||
182 | /** | |
183 | * smp_call_function_mask(): Run a function on a set of other CPUs. | |
184 | * @mask: The set of cpus to run on. Must not include the current cpu. | |
185 | * @func: The function to run. This must be fast and non-blocking. | |
186 | * @info: An arbitrary pointer to pass to the function. | |
187 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
188 | * | |
189 | * Returns 0 on success, else a negative status code. | |
190 | * | |
191 | * If @wait is true, then returns once @func has returned; otherwise | |
192 | * it returns just before the target cpu calls @func. | |
193 | * | |
194 | * You must not call this function with disabled interrupts or from a | |
195 | * hardware interrupt handler or from a bottom half handler. | |
196 | */ | |
197 | static int | |
198 | native_smp_call_function_mask(cpumask_t mask, | |
199 | void (*func)(void *), void *info, | |
200 | int wait) | |
201 | { | |
202 | struct call_data_struct data; | |
203 | cpumask_t allbutself; | |
204 | int cpus; | |
205 | ||
206 | /* Can deadlock when called with interrupts disabled */ | |
207 | WARN_ON(irqs_disabled()); | |
208 | ||
209 | /* Holding any lock stops cpus from going down. */ | |
210 | spin_lock(&call_lock); | |
211 | ||
212 | allbutself = cpu_online_map; | |
213 | cpu_clear(smp_processor_id(), allbutself); | |
214 | ||
215 | cpus_and(mask, mask, allbutself); | |
216 | cpus = cpus_weight(mask); | |
217 | ||
218 | if (!cpus) { | |
219 | spin_unlock(&call_lock); | |
220 | return 0; | |
221 | } | |
222 | ||
223 | data.func = func; | |
224 | data.info = info; | |
225 | atomic_set(&data.started, 0); | |
226 | data.wait = wait; | |
227 | if (wait) | |
228 | atomic_set(&data.finished, 0); | |
229 | ||
230 | call_data = &data; | |
231 | wmb(); | |
232 | ||
233 | /* Send a message to other CPUs */ | |
234 | if (cpus_equal(mask, allbutself)) | |
235 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | |
236 | else | |
237 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | |
238 | ||
239 | /* Wait for response */ | |
240 | while (atomic_read(&data.started) != cpus) | |
241 | cpu_relax(); | |
242 | ||
243 | if (wait) | |
244 | while (atomic_read(&data.finished) != cpus) | |
245 | cpu_relax(); | |
246 | spin_unlock(&call_lock); | |
247 | ||
248 | return 0; | |
249 | } | |
250 | ||
251 | static void stop_this_cpu(void *dummy) | |
252 | { | |
253 | local_irq_disable(); | |
254 | /* | |
255 | * Remove this CPU: | |
256 | */ | |
257 | cpu_clear(smp_processor_id(), cpu_online_map); | |
258 | disable_local_APIC(); | |
259 | if (hlt_works(smp_processor_id())) | |
260 | for (;;) halt(); | |
261 | for (;;); | |
262 | } | |
263 | ||
264 | /* | |
265 | * this function calls the 'stop' function on all other CPUs in the system. | |
266 | */ | |
267 | ||
268 | static void native_smp_send_stop(void) | |
269 | { | |
270 | int nolock; | |
271 | unsigned long flags; | |
272 | ||
273 | if (reboot_force) | |
274 | return; | |
275 | ||
276 | /* Don't deadlock on the call lock in panic */ | |
277 | nolock = !spin_trylock(&call_lock); | |
278 | local_irq_save(flags); | |
279 | __smp_call_function(stop_this_cpu, NULL, 0, 0); | |
280 | if (!nolock) | |
281 | spin_unlock(&call_lock); | |
282 | disable_local_APIC(); | |
283 | local_irq_restore(flags); | |
284 | } | |
285 | ||
286 | /* | |
287 | * Reschedule call back. Nothing to do, | |
288 | * all the work is done automatically when | |
289 | * we return from the interrupt. | |
290 | */ | |
291 | void smp_reschedule_interrupt(struct pt_regs *regs) | |
292 | { | |
293 | ack_APIC_irq(); | |
294 | #ifdef CONFIG_X86_32 | |
295 | __get_cpu_var(irq_stat).irq_resched_count++; | |
296 | #else | |
297 | add_pda(irq_resched_count, 1); | |
298 | #endif | |
299 | } | |
300 | ||
301 | void smp_call_function_interrupt(struct pt_regs *regs) | |
302 | { | |
303 | void (*func) (void *info) = call_data->func; | |
304 | void *info = call_data->info; | |
305 | int wait = call_data->wait; | |
306 | ||
307 | ack_APIC_irq(); | |
308 | /* | |
309 | * Notify initiating CPU that I've grabbed the data and am | |
310 | * about to execute the function | |
311 | */ | |
312 | mb(); | |
313 | atomic_inc(&call_data->started); | |
314 | /* | |
315 | * At this point the info structure may be out of scope unless wait==1 | |
316 | */ | |
317 | irq_enter(); | |
318 | (*func)(info); | |
319 | #ifdef CONFIG_X86_32 | |
320 | __get_cpu_var(irq_stat).irq_call_count++; | |
321 | #else | |
322 | add_pda(irq_call_count, 1); | |
323 | #endif | |
324 | irq_exit(); | |
325 | ||
326 | if (wait) { | |
327 | mb(); | |
328 | atomic_inc(&call_data->finished); | |
329 | } | |
330 | } | |
331 | ||
332 | struct smp_ops smp_ops = { | |
333 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | |
334 | .smp_prepare_cpus = native_smp_prepare_cpus, | |
335 | .cpu_up = native_cpu_up, | |
336 | .smp_cpus_done = native_smp_cpus_done, | |
337 | ||
338 | .smp_send_stop = native_smp_send_stop, | |
339 | .smp_send_reschedule = native_smp_send_reschedule, | |
340 | .smp_call_function_mask = native_smp_call_function_mask, | |
341 | }; | |
342 | EXPORT_SYMBOL_GPL(smp_ops); | |
343 |