Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ |
2 | ||
3 | /* Copyright (C) 1999,2001 | |
4 | * | |
5 | * Author: J.E.J.Bottomley@HansenPartnership.com | |
6 | * | |
1da177e4 LT |
7 | * This file provides all the same external entries as smp.c but uses |
8 | * the voyager hal to provide the functionality | |
9 | */ | |
6cd10f8d | 10 | #include <linux/cpu.h> |
153f8057 | 11 | #include <linux/module.h> |
1da177e4 LT |
12 | #include <linux/mm.h> |
13 | #include <linux/kernel_stat.h> | |
14 | #include <linux/delay.h> | |
15 | #include <linux/mc146818rtc.h> | |
16 | #include <linux/cache.h> | |
17 | #include <linux/interrupt.h> | |
1da177e4 LT |
18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | |
20 | #include <linux/bootmem.h> | |
21 | #include <linux/completion.h> | |
22 | #include <asm/desc.h> | |
23 | #include <asm/voyager.h> | |
24 | #include <asm/vic.h> | |
25 | #include <asm/mtrr.h> | |
26 | #include <asm/pgalloc.h> | |
27 | #include <asm/tlbflush.h> | |
28 | #include <asm/arch_hooks.h> | |
e44b7b75 | 29 | #include <asm/trampoline.h> |
1da177e4 | 30 | |
1da177e4 | 31 | /* TLB state -- visible externally, indexed physically */ |
0cca1ca6 | 32 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; |
1da177e4 LT |
33 | |
34 | /* CPU IRQ affinity -- set to all ones initially */ | |
a4ec1eff IM |
35 | static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = |
36 | {[0 ... NR_CPUS-1] = ~0UL }; | |
1da177e4 LT |
37 | |
38 | /* per CPU data structure (for /proc/cpuinfo et al), visible externally | |
39 | * indexed physically */ | |
0cca1ca6 | 40 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
92cb7612 | 41 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
1da177e4 LT |
42 | |
43 | /* physical ID of the CPU used to boot the system */ | |
44 | unsigned char boot_cpu_id; | |
45 | ||
46 | /* The memory line addresses for the Quad CPIs */ | |
47 | struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned; | |
48 | ||
49 | /* The masks for the Extended VIC processors, filled in by cat_init */ | |
50 | __u32 voyager_extended_vic_processors = 0; | |
51 | ||
52 | /* Masks for the extended Quad processors which cannot be VIC booted */ | |
53 | __u32 voyager_allowed_boot_processors = 0; | |
54 | ||
55 | /* The mask for the Quad Processors (both extended and non-extended) */ | |
56 | __u32 voyager_quad_processors = 0; | |
57 | ||
58 | /* Total count of live CPUs, used in process.c to display | |
59 | * the CPU information and in irq.c for the per CPU irq | |
60 | * activity count. Finally exported by i386_ksyms.c */ | |
61 | static int voyager_extended_cpus = 1; | |
62 | ||
1da177e4 LT |
63 | /* Used for the invalidate map that's also checked in the spinlock */ |
64 | static volatile unsigned long smp_invalidate_needed; | |
65 | ||
1da177e4 LT |
66 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used |
67 | * by scheduler but indexed physically */ | |
68 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | |
69 | ||
1da177e4 LT |
70 | /* The internal functions */ |
71 | static void send_CPI(__u32 cpuset, __u8 cpi); | |
72 | static void ack_CPI(__u8 cpi); | |
73 | static int ack_QIC_CPI(__u8 cpi); | |
74 | static void ack_special_QIC_CPI(__u8 cpi); | |
75 | static void ack_VIC_CPI(__u8 cpi); | |
76 | static void send_CPI_allbutself(__u8 cpi); | |
c771746e JB |
77 | static void mask_vic_irq(unsigned int irq); |
78 | static void unmask_vic_irq(unsigned int irq); | |
1da177e4 LT |
79 | static unsigned int startup_vic_irq(unsigned int irq); |
80 | static void enable_local_vic_irq(unsigned int irq); | |
81 | static void disable_local_vic_irq(unsigned int irq); | |
82 | static void before_handle_vic_irq(unsigned int irq); | |
83 | static void after_handle_vic_irq(unsigned int irq); | |
84 | static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); | |
85 | static void ack_vic_irq(unsigned int irq); | |
86 | static void vic_enable_cpi(void); | |
87 | static void do_boot_cpu(__u8 cpuid); | |
88 | static void do_quad_bootstrap(void); | |
08c33308 | 89 | static void initialize_secondary(void); |
1da177e4 LT |
90 | |
91 | int hard_smp_processor_id(void); | |
2654c08c | 92 | int safe_smp_processor_id(void); |
1da177e4 LT |
93 | |
94 | /* Inline functions */ | |
a4ec1eff | 95 | static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi) |
1da177e4 LT |
96 | { |
97 | voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi = | |
a4ec1eff | 98 | (smp_processor_id() << 16) + cpi; |
1da177e4 LT |
99 | } |
100 | ||
a4ec1eff | 101 | static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi) |
1da177e4 LT |
102 | { |
103 | int cpu; | |
104 | ||
105 | for_each_online_cpu(cpu) { | |
a4ec1eff | 106 | if (cpuset & (1 << cpu)) { |
1da177e4 | 107 | #ifdef VOYAGER_DEBUG |
7c04e64a | 108 | if (!cpu_online(cpu)) |
a4ec1eff IM |
109 | VDEBUG(("CPU%d sending cpi %d to CPU%d not in " |
110 | "cpu_online_map\n", | |
111 | hard_smp_processor_id(), cpi, cpu)); | |
1da177e4 LT |
112 | #endif |
113 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); | |
114 | } | |
115 | } | |
116 | } | |
117 | ||
a4ec1eff | 118 | static inline void wrapper_smp_local_timer_interrupt(void) |
6431e6a2 DH |
119 | { |
120 | irq_enter(); | |
7d12e780 | 121 | smp_local_timer_interrupt(); |
6431e6a2 DH |
122 | irq_exit(); |
123 | } | |
124 | ||
a4ec1eff | 125 | static inline void send_one_CPI(__u8 cpu, __u8 cpi) |
1da177e4 | 126 | { |
a4ec1eff | 127 | if (voyager_quad_processors & (1 << cpu)) |
1da177e4 LT |
128 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); |
129 | else | |
a4ec1eff | 130 | send_CPI(1 << cpu, cpi); |
1da177e4 LT |
131 | } |
132 | ||
a4ec1eff | 133 | static inline void send_CPI_allbutself(__u8 cpi) |
1da177e4 LT |
134 | { |
135 | __u8 cpu = smp_processor_id(); | |
136 | __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu); | |
137 | send_CPI(mask, cpi); | |
138 | } | |
139 | ||
a4ec1eff | 140 | static inline int is_cpu_quad(void) |
1da177e4 LT |
141 | { |
142 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | |
143 | return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER); | |
144 | } | |
145 | ||
a4ec1eff | 146 | static inline int is_cpu_extended(void) |
1da177e4 LT |
147 | { |
148 | __u8 cpu = hard_smp_processor_id(); | |
149 | ||
a4ec1eff | 150 | return (voyager_extended_vic_processors & (1 << cpu)); |
1da177e4 LT |
151 | } |
152 | ||
a4ec1eff | 153 | static inline int is_cpu_vic_boot(void) |
1da177e4 LT |
154 | { |
155 | __u8 cpu = hard_smp_processor_id(); | |
156 | ||
a4ec1eff IM |
157 | return (voyager_extended_vic_processors |
158 | & voyager_allowed_boot_processors & (1 << cpu)); | |
1da177e4 LT |
159 | } |
160 | ||
a4ec1eff | 161 | static inline void ack_CPI(__u8 cpi) |
1da177e4 | 162 | { |
a4ec1eff | 163 | switch (cpi) { |
1da177e4 | 164 | case VIC_CPU_BOOT_CPI: |
a4ec1eff | 165 | if (is_cpu_quad() && !is_cpu_vic_boot()) |
1da177e4 LT |
166 | ack_QIC_CPI(cpi); |
167 | else | |
168 | ack_VIC_CPI(cpi); | |
169 | break; | |
170 | case VIC_SYS_INT: | |
a4ec1eff | 171 | case VIC_CMN_INT: |
1da177e4 LT |
172 | /* These are slightly strange. Even on the Quad card, |
173 | * They are vectored as VIC CPIs */ | |
a4ec1eff | 174 | if (is_cpu_quad()) |
1da177e4 LT |
175 | ack_special_QIC_CPI(cpi); |
176 | else | |
177 | ack_VIC_CPI(cpi); | |
178 | break; | |
179 | default: | |
180 | printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi); | |
181 | break; | |
182 | } | |
183 | } | |
184 | ||
185 | /* local variables */ | |
186 | ||
187 | /* The VIC IRQ descriptors -- these look almost identical to the | |
188 | * 8259 IRQs except that masks and things must be kept per processor | |
189 | */ | |
c771746e | 190 | static struct irq_chip vic_chip = { |
a4ec1eff IM |
191 | .name = "VIC", |
192 | .startup = startup_vic_irq, | |
193 | .mask = mask_vic_irq, | |
194 | .unmask = unmask_vic_irq, | |
195 | .set_affinity = set_vic_irq_affinity, | |
1da177e4 LT |
196 | }; |
197 | ||
198 | /* used to count up as CPUs are brought on line (starts at 0) */ | |
199 | static int cpucount = 0; | |
200 | ||
1da177e4 LT |
201 | /* The per cpu profile stuff - used in smp_local_timer_interrupt */ |
202 | static DEFINE_PER_CPU(int, prof_multiplier) = 1; | |
203 | static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; | |
a4ec1eff | 204 | static DEFINE_PER_CPU(int, prof_counter) = 1; |
1da177e4 LT |
205 | |
206 | /* the map used to check if a CPU has booted */ | |
207 | static __u32 cpu_booted_map; | |
208 | ||
209 | /* the synchronize flag used to hold all secondary CPUs spinning in | |
210 | * a tight loop until the boot sequence is ready for them */ | |
211 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | |
212 | ||
213 | /* This is for the new dynamic CPU boot code */ | |
214 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | |
215 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | |
216 | ||
217 | /* The per processor IRQ masks (these are usually kept in sync) */ | |
218 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | |
219 | ||
220 | /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */ | |
221 | static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; | |
222 | ||
223 | /* Lock for enable/disable of VIC interrupts */ | |
a4ec1eff | 224 | static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock); |
1da177e4 | 225 | |
a4ec1eff | 226 | /* The boot processor is correctly set up in PC mode when it |
1da177e4 LT |
227 | * comes up, but the secondaries need their master/slave 8259 |
228 | * pairs initializing correctly */ | |
229 | ||
230 | /* Interrupt counters (per cpu) and total - used to try to | |
231 | * even up the interrupt handling routines */ | |
232 | static long vic_intr_total = 0; | |
233 | static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 }; | |
234 | static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 }; | |
235 | ||
236 | /* Since we can only use CPI0, we fake all the other CPIs */ | |
237 | static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned; | |
238 | ||
239 | /* debugging routine to read the isr of the cpu's pic */ | |
a4ec1eff | 240 | static inline __u16 vic_read_isr(void) |
1da177e4 LT |
241 | { |
242 | __u16 isr; | |
243 | ||
244 | outb(0x0b, 0xa0); | |
245 | isr = inb(0xa0) << 8; | |
246 | outb(0x0b, 0x20); | |
247 | isr |= inb(0x20); | |
248 | ||
249 | return isr; | |
250 | } | |
251 | ||
a4ec1eff | 252 | static __init void qic_setup(void) |
1da177e4 | 253 | { |
a4ec1eff | 254 | if (!is_cpu_quad()) { |
1da177e4 LT |
255 | /* not a quad, no setup */ |
256 | return; | |
257 | } | |
258 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); | |
259 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | |
a4ec1eff IM |
260 | |
261 | if (is_cpu_extended()) { | |
1da177e4 LT |
262 | /* the QIC duplicate of the VIC base register */ |
263 | outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER); | |
264 | outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER); | |
265 | ||
266 | /* FIXME: should set up the QIC timer and memory parity | |
267 | * error vectors here */ | |
268 | } | |
269 | } | |
270 | ||
a4ec1eff | 271 | static __init void vic_setup_pic(void) |
1da177e4 LT |
272 | { |
273 | outb(1, VIC_REDIRECT_REGISTER_1); | |
274 | /* clear the claim registers for dynamic routing */ | |
275 | outb(0, VIC_CLAIM_REGISTER_0); | |
276 | outb(0, VIC_CLAIM_REGISTER_1); | |
277 | ||
278 | outb(0, VIC_PRIORITY_REGISTER); | |
279 | /* Set the Primary and Secondary Microchannel vector | |
280 | * bases to be the same as the ordinary interrupts | |
281 | * | |
282 | * FIXME: This would be more efficient using separate | |
283 | * vectors. */ | |
284 | outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); | |
285 | outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); | |
286 | /* Now initiallise the master PIC belonging to this CPU by | |
287 | * sending the four ICWs */ | |
288 | ||
289 | /* ICW1: level triggered, ICW4 needed */ | |
290 | outb(0x19, 0x20); | |
291 | ||
292 | /* ICW2: vector base */ | |
293 | outb(FIRST_EXTERNAL_VECTOR, 0x21); | |
294 | ||
295 | /* ICW3: slave at line 2 */ | |
296 | outb(0x04, 0x21); | |
297 | ||
298 | /* ICW4: 8086 mode */ | |
299 | outb(0x01, 0x21); | |
300 | ||
301 | /* now the same for the slave PIC */ | |
302 | ||
303 | /* ICW1: level trigger, ICW4 needed */ | |
304 | outb(0x19, 0xA0); | |
305 | ||
306 | /* ICW2: slave vector base */ | |
307 | outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1); | |
a4ec1eff | 308 | |
1da177e4 LT |
309 | /* ICW3: slave ID */ |
310 | outb(0x02, 0xA1); | |
311 | ||
312 | /* ICW4: 8086 mode */ | |
313 | outb(0x01, 0xA1); | |
314 | } | |
315 | ||
a4ec1eff | 316 | static void do_quad_bootstrap(void) |
1da177e4 | 317 | { |
a4ec1eff | 318 | if (is_cpu_quad() && is_cpu_vic_boot()) { |
1da177e4 LT |
319 | int i; |
320 | unsigned long flags; | |
321 | __u8 cpuid = hard_smp_processor_id(); | |
322 | ||
323 | local_irq_save(flags); | |
324 | ||
a4ec1eff | 325 | for (i = 0; i < 4; i++) { |
1da177e4 | 326 | /* FIXME: this would be >>3 &0x7 on the 32 way */ |
a4ec1eff | 327 | if (((cpuid >> 2) & 0x03) == i) |
1da177e4 LT |
328 | /* don't lower our own mask! */ |
329 | continue; | |
330 | ||
331 | /* masquerade as local Quad CPU */ | |
332 | outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID); | |
333 | /* enable the startup CPI */ | |
334 | outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1); | |
335 | /* restore cpu id */ | |
336 | outb(0, QIC_PROCESSOR_ID); | |
337 | } | |
338 | local_irq_restore(flags); | |
339 | } | |
340 | } | |
341 | ||
ee477524 JB |
342 | void prefill_possible_map(void) |
343 | { | |
344 | /* This is empty on voyager because we need a much | |
345 | * earlier detection which is done in find_smp_config */ | |
346 | } | |
347 | ||
1da177e4 LT |
348 | /* Set up all the basic stuff: read the SMP config and make all the |
349 | * SMP information reflect only the boot cpu. All others will be | |
350 | * brought on-line later. */ | |
a4ec1eff | 351 | void __init find_smp_config(void) |
1da177e4 LT |
352 | { |
353 | int i; | |
354 | ||
355 | boot_cpu_id = hard_smp_processor_id(); | |
356 | ||
357 | printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); | |
358 | ||
359 | /* initialize the CPU structures (moved from smp_boot_cpus) */ | |
9628937d | 360 | for (i = 0; i < nr_cpu_ids; i++) |
1da177e4 | 361 | cpu_irq_affinity[i] = ~0; |
1da177e4 LT |
362 | cpu_online_map = cpumask_of_cpu(boot_cpu_id); |
363 | ||
364 | /* The boot CPU must be extended */ | |
a4ec1eff | 365 | voyager_extended_vic_processors = 1 << boot_cpu_id; |
27b46d76 | 366 | /* initially, all of the first 8 CPUs can boot */ |
1da177e4 LT |
367 | voyager_allowed_boot_processors = 0xff; |
368 | /* set up everything for just this CPU, we can alter | |
369 | * this as we start the other CPUs later */ | |
370 | /* now get the CPU disposition from the extended CMOS */ | |
a4ec1eff IM |
371 | cpus_addr(phys_cpu_present_map)[0] = |
372 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); | |
373 | cpus_addr(phys_cpu_present_map)[0] |= | |
374 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; | |
375 | cpus_addr(phys_cpu_present_map)[0] |= | |
376 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | |
377 | 2) << 16; | |
378 | cpus_addr(phys_cpu_present_map)[0] |= | |
379 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | |
380 | 3) << 24; | |
f68a106f | 381 | cpu_possible_map = phys_cpu_present_map; |
a4ec1eff IM |
382 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", |
383 | cpus_addr(phys_cpu_present_map)[0]); | |
1da177e4 LT |
384 | /* Here we set up the VIC to enable SMP */ |
385 | /* enable the CPIs by writing the base vector to their register */ | |
386 | outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); | |
387 | outb(1, VIC_REDIRECT_REGISTER_1); | |
388 | /* set the claim registers for static routing --- Boot CPU gets | |
389 | * all interrupts untill all other CPUs started */ | |
390 | outb(0xff, VIC_CLAIM_REGISTER_0); | |
391 | outb(0xff, VIC_CLAIM_REGISTER_1); | |
392 | /* Set the Primary and Secondary Microchannel vector | |
393 | * bases to be the same as the ordinary interrupts | |
394 | * | |
395 | * FIXME: This would be more efficient using separate | |
396 | * vectors. */ | |
397 | outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); | |
398 | outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); | |
399 | ||
400 | /* Finally tell the firmware that we're driving */ | |
401 | outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG, | |
402 | VOYAGER_SUS_IN_CONTROL_PORT); | |
403 | ||
404 | current_thread_info()->cpu = boot_cpu_id; | |
6a3ee3d5 | 405 | x86_write_percpu(cpu_number, boot_cpu_id); |
1da177e4 LT |
406 | } |
407 | ||
408 | /* | |
409 | * The bootstrap kernel entry code has set these up. Save them | |
410 | * for a given CPU, id is physical */ | |
a4ec1eff | 411 | void __init smp_store_cpu_info(int id) |
1da177e4 | 412 | { |
92cb7612 | 413 | struct cpuinfo_x86 *c = &cpu_data(id); |
1da177e4 LT |
414 | |
415 | *c = boot_cpu_data; | |
bfcb4c1b | 416 | c->cpu_index = id; |
1da177e4 | 417 | |
6a3ee3d5 | 418 | identify_secondary_cpu(c); |
1da177e4 LT |
419 | } |
420 | ||
1da177e4 | 421 | /* Routine initially called when a non-boot CPU is brought online */ |
a4ec1eff | 422 | static void __init start_secondary(void *unused) |
1da177e4 LT |
423 | { |
424 | __u8 cpuid = hard_smp_processor_id(); | |
1da177e4 | 425 | |
6a3ee3d5 | 426 | cpu_init(); |
1da177e4 LT |
427 | |
428 | /* OK, we're in the routine */ | |
429 | ack_CPI(VIC_CPU_BOOT_CPI); | |
430 | ||
431 | /* setup the 8259 master slave pair belonging to this CPU --- | |
a4ec1eff IM |
432 | * we won't actually receive any until the boot CPU |
433 | * relinquishes it's static routing mask */ | |
1da177e4 LT |
434 | vic_setup_pic(); |
435 | ||
436 | qic_setup(); | |
437 | ||
a4ec1eff | 438 | if (is_cpu_quad() && !is_cpu_vic_boot()) { |
1da177e4 LT |
439 | /* clear the boot CPI */ |
440 | __u8 dummy; | |
441 | ||
a4ec1eff IM |
442 | dummy = |
443 | voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi; | |
1da177e4 LT |
444 | printk("read dummy %d\n", dummy); |
445 | } | |
446 | ||
447 | /* lower the mask to receive CPIs */ | |
448 | vic_enable_cpi(); | |
449 | ||
450 | VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid)); | |
451 | ||
e545a614 MS |
452 | notify_cpu_starting(cpuid); |
453 | ||
1da177e4 LT |
454 | /* enable interrupts */ |
455 | local_irq_enable(); | |
456 | ||
457 | /* get our bogomips */ | |
458 | calibrate_delay(); | |
459 | ||
460 | /* save our processor parameters */ | |
461 | smp_store_cpu_info(cpuid); | |
462 | ||
463 | /* if we're a quad, we may need to bootstrap other CPUs */ | |
464 | do_quad_bootstrap(); | |
465 | ||
466 | /* FIXME: this is rather a poor hack to prevent the CPU | |
467 | * activating softirqs while it's supposed to be waiting for | |
468 | * permission to proceed. Without this, the new per CPU stuff | |
469 | * in the softirqs will fail */ | |
470 | local_irq_disable(); | |
471 | cpu_set(cpuid, cpu_callin_map); | |
472 | ||
473 | /* signal that we're done */ | |
474 | cpu_booted_map = 1; | |
475 | ||
476 | while (!cpu_isset(cpuid, smp_commenced_mask)) | |
477 | rep_nop(); | |
478 | local_irq_enable(); | |
479 | ||
480 | local_flush_tlb(); | |
481 | ||
482 | cpu_set(cpuid, cpu_online_map); | |
483 | wmb(); | |
484 | cpu_idle(); | |
485 | } | |
486 | ||
1da177e4 LT |
487 | /* Routine to kick start the given CPU and wait for it to report ready |
488 | * (or timeout in startup). When this routine returns, the requested | |
489 | * CPU is either fully running and configured or known to be dead. | |
490 | * | |
491 | * We call this routine sequentially 1 CPU at a time, so no need for | |
492 | * locking */ | |
493 | ||
a4ec1eff | 494 | static void __init do_boot_cpu(__u8 cpu) |
1da177e4 LT |
495 | { |
496 | struct task_struct *idle; | |
497 | int timeout; | |
498 | unsigned long flags; | |
a4ec1eff IM |
499 | int quad_boot = (1 << cpu) & voyager_quad_processors |
500 | & ~(voyager_extended_vic_processors | |
501 | & voyager_allowed_boot_processors); | |
1da177e4 | 502 | |
1da177e4 LT |
503 | /* This is the format of the CPI IDT gate (in real mode) which |
504 | * we're hijacking to boot the CPU */ | |
a4ec1eff | 505 | union IDTFormat { |
1da177e4 | 506 | struct seg { |
a4ec1eff IM |
507 | __u16 Offset; |
508 | __u16 Segment; | |
1da177e4 LT |
509 | } idt; |
510 | __u32 val; | |
511 | } hijack_source; | |
512 | ||
513 | __u32 *hijack_vector; | |
514 | __u32 start_phys_address = setup_trampoline(); | |
515 | ||
516 | /* There's a clever trick to this: The linux trampoline is | |
517 | * compiled to begin at absolute location zero, so make the | |
518 | * address zero but have the data segment selector compensate | |
519 | * for the actual address */ | |
520 | hijack_source.idt.Offset = start_phys_address & 0x000F; | |
521 | hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF; | |
522 | ||
523 | cpucount++; | |
d6444514 JB |
524 | alternatives_smp_switch(1); |
525 | ||
1da177e4 | 526 | idle = fork_idle(cpu); |
a4ec1eff | 527 | if (IS_ERR(idle)) |
1da177e4 | 528 | panic("failed fork for CPU%d", cpu); |
65ea5b03 | 529 | idle->thread.ip = (unsigned long)start_secondary; |
1da177e4 | 530 | /* init_tasks (in sched.c) is indexed logically */ |
65ea5b03 | 531 | stack_start.sp = (void *)idle->thread.sp; |
1da177e4 | 532 | |
6a3ee3d5 | 533 | init_gdt(cpu); |
a4ec1eff | 534 | per_cpu(current_task, cpu) = idle; |
6a3ee3d5 | 535 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
1da177e4 LT |
536 | irq_ctx_init(cpu); |
537 | ||
538 | /* Note: Don't modify initial ss override */ | |
a4ec1eff | 539 | VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, |
1da177e4 | 540 | (unsigned long)hijack_source.val, hijack_source.idt.Segment, |
65ea5b03 | 541 | hijack_source.idt.Offset, stack_start.sp)); |
9d0e59a3 EB |
542 | |
543 | /* init lowmem identity mapping */ | |
68db065c JF |
544 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
545 | min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); | |
9d0e59a3 | 546 | flush_tlb_all(); |
1da177e4 | 547 | |
a4ec1eff | 548 | if (quad_boot) { |
1da177e4 | 549 | printk("CPU %d: non extended Quad boot\n", cpu); |
a4ec1eff IM |
550 | hijack_vector = |
551 | (__u32 *) | |
552 | phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4); | |
1da177e4 LT |
553 | *hijack_vector = hijack_source.val; |
554 | } else { | |
555 | printk("CPU%d: extended VIC boot\n", cpu); | |
a4ec1eff IM |
556 | hijack_vector = |
557 | (__u32 *) | |
558 | phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4); | |
1da177e4 LT |
559 | *hijack_vector = hijack_source.val; |
560 | /* VIC errata, may also receive interrupt at this address */ | |
a4ec1eff IM |
561 | hijack_vector = |
562 | (__u32 *) | |
563 | phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + | |
564 | VIC_DEFAULT_CPI_BASE) * 4); | |
1da177e4 LT |
565 | *hijack_vector = hijack_source.val; |
566 | } | |
567 | /* All non-boot CPUs start with interrupts fully masked. Need | |
568 | * to lower the mask of the CPI we're about to send. We do | |
569 | * this in the VIC by masquerading as the processor we're | |
570 | * about to boot and lowering its interrupt mask */ | |
571 | local_irq_save(flags); | |
a4ec1eff | 572 | if (quad_boot) { |
1da177e4 LT |
573 | send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI); |
574 | } else { | |
575 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | |
576 | /* here we're altering registers belonging to `cpu' */ | |
a4ec1eff | 577 | |
1da177e4 LT |
578 | outb(VIC_BOOT_INTERRUPT_MASK, 0x21); |
579 | /* now go back to our original identity */ | |
580 | outb(boot_cpu_id, VIC_PROCESSOR_ID); | |
581 | ||
582 | /* and boot the CPU */ | |
583 | ||
a4ec1eff | 584 | send_CPI((1 << cpu), VIC_CPU_BOOT_CPI); |
1da177e4 LT |
585 | } |
586 | cpu_booted_map = 0; | |
587 | local_irq_restore(flags); | |
588 | ||
589 | /* now wait for it to become ready (or timeout) */ | |
a4ec1eff IM |
590 | for (timeout = 0; timeout < 50000; timeout++) { |
591 | if (cpu_booted_map) | |
1da177e4 LT |
592 | break; |
593 | udelay(100); | |
594 | } | |
595 | /* reset the page table */ | |
9d0e59a3 | 596 | zap_low_mappings(); |
a4ec1eff | 597 | |
1da177e4 LT |
598 | if (cpu_booted_map) { |
599 | VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", | |
600 | cpu, smp_processor_id())); | |
a4ec1eff | 601 | |
1da177e4 | 602 | printk("CPU%d: ", cpu); |
92cb7612 | 603 | print_cpu_info(&cpu_data(cpu)); |
1da177e4 LT |
604 | wmb(); |
605 | cpu_set(cpu, cpu_callout_map); | |
3c101cf0 | 606 | cpu_set(cpu, cpu_present_map); |
a4ec1eff | 607 | } else { |
1da177e4 | 608 | printk("CPU%d FAILED TO BOOT: ", cpu); |
a4ec1eff IM |
609 | if (* |
610 | ((volatile unsigned char *)phys_to_virt(start_phys_address)) | |
611 | == 0xA5) | |
1da177e4 LT |
612 | printk("Stuck.\n"); |
613 | else | |
614 | printk("Not responding.\n"); | |
a4ec1eff | 615 | |
1da177e4 LT |
616 | cpucount--; |
617 | } | |
618 | } | |
619 | ||
a4ec1eff | 620 | void __init smp_boot_cpus(void) |
1da177e4 LT |
621 | { |
622 | int i; | |
623 | ||
624 | /* CAT BUS initialisation must be done after the memory */ | |
625 | /* FIXME: The L4 has a catbus too, it just needs to be | |
626 | * accessed in a totally different way */ | |
a4ec1eff | 627 | if (voyager_level == 5) { |
1da177e4 LT |
628 | voyager_cat_init(); |
629 | ||
630 | /* now that the cat has probed the Voyager System Bus, sanity | |
631 | * check the cpu map */ | |
a4ec1eff IM |
632 | if (((voyager_quad_processors | voyager_extended_vic_processors) |
633 | & cpus_addr(phys_cpu_present_map)[0]) != | |
634 | cpus_addr(phys_cpu_present_map)[0]) { | |
1da177e4 | 635 | /* should panic */ |
a4ec1eff IM |
636 | printk("\n\n***WARNING*** " |
637 | "Sanity check of CPU present map FAILED\n"); | |
1da177e4 | 638 | } |
a4ec1eff IM |
639 | } else if (voyager_level == 4) |
640 | voyager_extended_vic_processors = | |
641 | cpus_addr(phys_cpu_present_map)[0]; | |
1da177e4 LT |
642 | |
643 | /* this sets up the idle task to run on the current cpu */ | |
644 | voyager_extended_cpus = 1; | |
645 | /* Remove the global_irq_holder setting, it triggers a BUG() on | |
646 | * schedule at the moment */ | |
647 | //global_irq_holder = boot_cpu_id; | |
648 | ||
649 | /* FIXME: Need to do something about this but currently only works | |
a4ec1eff IM |
650 | * on CPUs with a tsc which none of mine have. |
651 | smp_tune_scheduling(); | |
1da177e4 LT |
652 | */ |
653 | smp_store_cpu_info(boot_cpu_id); | |
08c33308 JB |
654 | /* setup the jump vector */ |
655 | initial_code = (unsigned long)initialize_secondary; | |
1da177e4 | 656 | printk("CPU%d: ", boot_cpu_id); |
92cb7612 | 657 | print_cpu_info(&cpu_data(boot_cpu_id)); |
1da177e4 | 658 | |
a4ec1eff | 659 | if (is_cpu_quad()) { |
1da177e4 LT |
660 | /* booting on a Quad CPU */ |
661 | printk("VOYAGER SMP: Boot CPU is Quad\n"); | |
662 | qic_setup(); | |
663 | do_quad_bootstrap(); | |
664 | } | |
665 | ||
666 | /* enable our own CPIs */ | |
667 | vic_enable_cpi(); | |
668 | ||
669 | cpu_set(boot_cpu_id, cpu_online_map); | |
670 | cpu_set(boot_cpu_id, cpu_callout_map); | |
a4ec1eff IM |
671 | |
672 | /* loop over all the extended VIC CPUs and boot them. The | |
1da177e4 | 673 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ |
168ef543 | 674 | for (i = 0; i < nr_cpu_ids; i++) { |
a4ec1eff | 675 | if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) |
1da177e4 LT |
676 | continue; |
677 | do_boot_cpu(i); | |
678 | /* This udelay seems to be needed for the Quad boots | |
679 | * don't remove unless you know what you're doing */ | |
680 | udelay(1000); | |
681 | } | |
682 | /* we could compute the total bogomips here, but why bother?, | |
683 | * Code added from smpboot.c */ | |
684 | { | |
685 | unsigned long bogosum = 0; | |
7c04e64a AM |
686 | |
687 | for_each_online_cpu(i) | |
688 | bogosum += cpu_data(i).loops_per_jiffy; | |
a4ec1eff IM |
689 | printk(KERN_INFO "Total of %d processors activated " |
690 | "(%lu.%02lu BogoMIPS).\n", | |
691 | cpucount + 1, bogosum / (500000 / HZ), | |
692 | (bogosum / (5000 / HZ)) % 100); | |
1da177e4 LT |
693 | } |
694 | voyager_extended_cpus = hweight32(voyager_extended_vic_processors); | |
a4ec1eff IM |
695 | printk("VOYAGER: Extended (interrupt handling CPUs): " |
696 | "%d, non-extended: %d\n", voyager_extended_cpus, | |
697 | num_booting_cpus() - voyager_extended_cpus); | |
1da177e4 LT |
698 | /* that's it, switch to symmetric mode */ |
699 | outb(0, VIC_PRIORITY_REGISTER); | |
700 | outb(0, VIC_CLAIM_REGISTER_0); | |
701 | outb(0, VIC_CLAIM_REGISTER_1); | |
a4ec1eff | 702 | |
1da177e4 LT |
703 | VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus())); |
704 | } | |
705 | ||
706 | /* Reload the secondary CPUs task structure (this function does not | |
707 | * return ) */ | |
08c33308 | 708 | static void __init initialize_secondary(void) |
1da177e4 LT |
709 | { |
710 | #if 0 | |
711 | // AC kernels only | |
712 | set_current(hard_get_current()); | |
713 | #endif | |
714 | ||
715 | /* | |
716 | * We don't actually need to load the full TSS, | |
717 | * basically just the stack pointer and the eip. | |
718 | */ | |
719 | ||
a4ec1eff | 720 | asm volatile ("movl %0,%%esp\n\t" |
65ea5b03 PA |
721 | "jmp *%1"::"r" (current->thread.sp), |
722 | "r"(current->thread.ip)); | |
1da177e4 LT |
723 | } |
724 | ||
725 | /* handle a Voyager SYS_INT -- If we don't, the base board will | |
726 | * panic the system. | |
727 | * | |
728 | * System interrupts occur because some problem was detected on the | |
729 | * various busses. To find out what you have to probe all the | |
730 | * hardware via the CAT bus. FIXME: At the moment we do nothing. */ | |
75604d7f | 731 | void smp_vic_sys_interrupt(struct pt_regs *regs) |
1da177e4 LT |
732 | { |
733 | ack_CPI(VIC_SYS_INT); | |
a4ec1eff | 734 | printk("Voyager SYSTEM INTERRUPT\n"); |
1da177e4 LT |
735 | } |
736 | ||
737 | /* Handle a voyager CMN_INT; These interrupts occur either because of | |
738 | * a system status change or because a single bit memory error | |
739 | * occurred. FIXME: At the moment, ignore all this. */ | |
75604d7f | 740 | void smp_vic_cmn_interrupt(struct pt_regs *regs) |
1da177e4 LT |
741 | { |
742 | static __u8 in_cmn_int = 0; | |
743 | static DEFINE_SPINLOCK(cmn_int_lock); | |
744 | ||
745 | /* common ints are broadcast, so make sure we only do this once */ | |
746 | _raw_spin_lock(&cmn_int_lock); | |
a4ec1eff | 747 | if (in_cmn_int) |
1da177e4 LT |
748 | goto unlock_end; |
749 | ||
750 | in_cmn_int++; | |
751 | _raw_spin_unlock(&cmn_int_lock); | |
752 | ||
753 | VDEBUG(("Voyager COMMON INTERRUPT\n")); | |
754 | ||
a4ec1eff | 755 | if (voyager_level == 5) |
1da177e4 LT |
756 | voyager_cat_do_common_interrupt(); |
757 | ||
758 | _raw_spin_lock(&cmn_int_lock); | |
759 | in_cmn_int = 0; | |
a4ec1eff | 760 | unlock_end: |
1da177e4 LT |
761 | _raw_spin_unlock(&cmn_int_lock); |
762 | ack_CPI(VIC_CMN_INT); | |
763 | } | |
764 | ||
765 | /* | |
766 | * Reschedule call back. Nothing to do, all the work is done | |
767 | * automatically when we return from the interrupt. */ | |
a4ec1eff | 768 | static void smp_reschedule_interrupt(void) |
1da177e4 LT |
769 | { |
770 | /* do nothing */ | |
771 | } | |
772 | ||
a4ec1eff | 773 | static struct mm_struct *flush_mm; |
1da177e4 LT |
774 | static unsigned long flush_va; |
775 | static DEFINE_SPINLOCK(tlbstate_lock); | |
1da177e4 LT |
776 | |
777 | /* | |
a4ec1eff | 778 | * We cannot call mmdrop() because we are in interrupt context, |
1da177e4 LT |
779 | * instead update mm->cpu_vm_mask. |
780 | * | |
781 | * We need to reload %cr3 since the page tables may be going | |
782 | * away from under us.. | |
783 | */ | |
925596a0 | 784 | static inline void voyager_leave_mm(unsigned long cpu) |
1da177e4 LT |
785 | { |
786 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | |
787 | BUG(); | |
788 | cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); | |
789 | load_cr3(swapper_pg_dir); | |
790 | } | |
791 | ||
1da177e4 LT |
792 | /* |
793 | * Invalidate call-back | |
794 | */ | |
a4ec1eff | 795 | static void smp_invalidate_interrupt(void) |
1da177e4 LT |
796 | { |
797 | __u8 cpu = smp_processor_id(); | |
798 | ||
799 | if (!test_bit(cpu, &smp_invalidate_needed)) | |
800 | return; | |
801 | /* This will flood messages. Don't uncomment unless you see | |
802 | * Problems with cross cpu invalidation | |
a4ec1eff IM |
803 | VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n", |
804 | smp_processor_id())); | |
805 | */ | |
1da177e4 LT |
806 | |
807 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | |
808 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | |
0b9c99b6 | 809 | if (flush_va == TLB_FLUSH_ALL) |
1da177e4 LT |
810 | local_flush_tlb(); |
811 | else | |
812 | __flush_tlb_one(flush_va); | |
813 | } else | |
925596a0 | 814 | voyager_leave_mm(cpu); |
1da177e4 LT |
815 | } |
816 | smp_mb__before_clear_bit(); | |
817 | clear_bit(cpu, &smp_invalidate_needed); | |
818 | smp_mb__after_clear_bit(); | |
819 | } | |
820 | ||
821 | /* All the new flush operations for 2.4 */ | |
822 | ||
1da177e4 LT |
823 | /* This routine is called with a physical cpu mask */ |
824 | static void | |
a4ec1eff IM |
825 | voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm, |
826 | unsigned long va) | |
1da177e4 LT |
827 | { |
828 | int stuck = 50000; | |
829 | ||
830 | if (!cpumask) | |
831 | BUG(); | |
832 | if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask) | |
833 | BUG(); | |
834 | if (cpumask & (1 << smp_processor_id())) | |
835 | BUG(); | |
836 | if (!mm) | |
837 | BUG(); | |
838 | ||
839 | spin_lock(&tlbstate_lock); | |
a4ec1eff | 840 | |
1da177e4 LT |
841 | flush_mm = mm; |
842 | flush_va = va; | |
843 | atomic_set_mask(cpumask, &smp_invalidate_needed); | |
844 | /* | |
845 | * We have to send the CPI only to | |
846 | * CPUs affected. | |
847 | */ | |
848 | send_CPI(cpumask, VIC_INVALIDATE_CPI); | |
849 | ||
850 | while (smp_invalidate_needed) { | |
851 | mb(); | |
a4ec1eff IM |
852 | if (--stuck == 0) { |
853 | printk("***WARNING*** Stuck doing invalidate CPI " | |
854 | "(CPU%d)\n", smp_processor_id()); | |
1da177e4 LT |
855 | break; |
856 | } | |
857 | } | |
858 | ||
859 | /* Uncomment only to debug invalidation problems | |
a4ec1eff IM |
860 | VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu)); |
861 | */ | |
1da177e4 LT |
862 | |
863 | flush_mm = NULL; | |
864 | flush_va = 0; | |
865 | spin_unlock(&tlbstate_lock); | |
866 | } | |
867 | ||
a4ec1eff | 868 | void flush_tlb_current_task(void) |
1da177e4 LT |
869 | { |
870 | struct mm_struct *mm = current->mm; | |
871 | unsigned long cpu_mask; | |
872 | ||
873 | preempt_disable(); | |
874 | ||
875 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
876 | local_flush_tlb(); | |
877 | if (cpu_mask) | |
0b9c99b6 | 878 | voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
1da177e4 LT |
879 | |
880 | preempt_enable(); | |
881 | } | |
882 | ||
a4ec1eff | 883 | void flush_tlb_mm(struct mm_struct *mm) |
1da177e4 LT |
884 | { |
885 | unsigned long cpu_mask; | |
886 | ||
887 | preempt_disable(); | |
888 | ||
889 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
890 | ||
891 | if (current->active_mm == mm) { | |
892 | if (current->mm) | |
893 | local_flush_tlb(); | |
894 | else | |
925596a0 | 895 | voyager_leave_mm(smp_processor_id()); |
1da177e4 LT |
896 | } |
897 | if (cpu_mask) | |
0b9c99b6 | 898 | voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
1da177e4 LT |
899 | |
900 | preempt_enable(); | |
901 | } | |
902 | ||
a4ec1eff | 903 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) |
1da177e4 LT |
904 | { |
905 | struct mm_struct *mm = vma->vm_mm; | |
906 | unsigned long cpu_mask; | |
907 | ||
908 | preempt_disable(); | |
909 | ||
910 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
911 | if (current->active_mm == mm) { | |
a4ec1eff | 912 | if (current->mm) |
1da177e4 | 913 | __flush_tlb_one(va); |
a4ec1eff | 914 | else |
925596a0 | 915 | voyager_leave_mm(smp_processor_id()); |
1da177e4 LT |
916 | } |
917 | ||
918 | if (cpu_mask) | |
6a3ee3d5 | 919 | voyager_flush_tlb_others(cpu_mask, mm, va); |
1da177e4 LT |
920 | |
921 | preempt_enable(); | |
922 | } | |
a4ec1eff | 923 | |
153f8057 | 924 | EXPORT_SYMBOL(flush_tlb_page); |
1da177e4 LT |
925 | |
926 | /* enable the requested IRQs */ | |
a4ec1eff | 927 | static void smp_enable_irq_interrupt(void) |
1da177e4 LT |
928 | { |
929 | __u8 irq; | |
930 | __u8 cpu = get_cpu(); | |
931 | ||
932 | VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu, | |
a4ec1eff | 933 | vic_irq_enable_mask[cpu])); |
1da177e4 LT |
934 | |
935 | spin_lock(&vic_irq_lock); | |
a4ec1eff IM |
936 | for (irq = 0; irq < 16; irq++) { |
937 | if (vic_irq_enable_mask[cpu] & (1 << irq)) | |
1da177e4 LT |
938 | enable_local_vic_irq(irq); |
939 | } | |
940 | vic_irq_enable_mask[cpu] = 0; | |
941 | spin_unlock(&vic_irq_lock); | |
942 | ||
943 | put_cpu_no_resched(); | |
944 | } | |
a4ec1eff | 945 | |
1da177e4 LT |
946 | /* |
947 | * CPU halt call-back | |
948 | */ | |
a4ec1eff | 949 | static void smp_stop_cpu_function(void *dummy) |
1da177e4 LT |
950 | { |
951 | VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id())); | |
952 | cpu_clear(smp_processor_id(), cpu_online_map); | |
953 | local_irq_disable(); | |
a4ec1eff | 954 | for (;;) |
f2ab4461 | 955 | halt(); |
1da177e4 LT |
956 | } |
957 | ||
1da177e4 LT |
958 | /* execute a thread on a new CPU. The function to be called must be |
959 | * previously set up. This is used to schedule a function for | |
27b46d76 | 960 | * execution on all CPUs - set up the function then broadcast a |
1da177e4 | 961 | * function_interrupt CPI to come here on each CPU */ |
a4ec1eff | 962 | static void smp_call_function_interrupt(void) |
1da177e4 | 963 | { |
1da177e4 | 964 | irq_enter(); |
3b16cf87 | 965 | generic_smp_call_function_interrupt(); |
38e760a1 | 966 | __get_cpu_var(irq_stat).irq_call_count++; |
1da177e4 | 967 | irq_exit(); |
1da177e4 LT |
968 | } |
969 | ||
3b16cf87 | 970 | static void smp_call_function_single_interrupt(void) |
1da177e4 | 971 | { |
3b16cf87 JA |
972 | irq_enter(); |
973 | generic_smp_call_function_single_interrupt(); | |
974 | __get_cpu_var(irq_stat).irq_call_count++; | |
975 | irq_exit(); | |
1da177e4 | 976 | } |
0293ca81 | 977 | |
1da177e4 LT |
978 | /* Sorry about the name. In an APIC based system, the APICs |
979 | * themselves are programmed to send a timer interrupt. This is used | |
980 | * by linux to reschedule the processor. Voyager doesn't have this, | |
981 | * so we use the system clock to interrupt one processor, which in | |
982 | * turn, broadcasts a timer CPI to all the others --- we receive that | |
983 | * CPI here. We don't use this actually for counting so losing | |
a4ec1eff | 984 | * ticks doesn't matter |
1da177e4 | 985 | * |
27b46d76 | 986 | * FIXME: For those CPUs which actually have a local APIC, we could |
1da177e4 LT |
987 | * try to use it to trigger this interrupt instead of having to |
988 | * broadcast the timer tick. Unfortunately, all my pentium DYADs have | |
989 | * no local APIC, so I can't do this | |
990 | * | |
991 | * This function is currently a placeholder and is unused in the code */ | |
75604d7f | 992 | void smp_apic_timer_interrupt(struct pt_regs *regs) |
1da177e4 | 993 | { |
7d12e780 DH |
994 | struct pt_regs *old_regs = set_irq_regs(regs); |
995 | wrapper_smp_local_timer_interrupt(); | |
996 | set_irq_regs(old_regs); | |
1da177e4 LT |
997 | } |
998 | ||
999 | /* All of the QUAD interrupt GATES */ | |
75604d7f | 1000 | void smp_qic_timer_interrupt(struct pt_regs *regs) |
1da177e4 | 1001 | { |
7d12e780 | 1002 | struct pt_regs *old_regs = set_irq_regs(regs); |
81c06b10 JB |
1003 | ack_QIC_CPI(QIC_TIMER_CPI); |
1004 | wrapper_smp_local_timer_interrupt(); | |
7d12e780 | 1005 | set_irq_regs(old_regs); |
1da177e4 LT |
1006 | } |
1007 | ||
75604d7f | 1008 | void smp_qic_invalidate_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1009 | { |
1010 | ack_QIC_CPI(QIC_INVALIDATE_CPI); | |
1011 | smp_invalidate_interrupt(); | |
1012 | } | |
1013 | ||
75604d7f | 1014 | void smp_qic_reschedule_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1015 | { |
1016 | ack_QIC_CPI(QIC_RESCHEDULE_CPI); | |
1017 | smp_reschedule_interrupt(); | |
1018 | } | |
1019 | ||
75604d7f | 1020 | void smp_qic_enable_irq_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1021 | { |
1022 | ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); | |
1023 | smp_enable_irq_interrupt(); | |
1024 | } | |
1025 | ||
75604d7f | 1026 | void smp_qic_call_function_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1027 | { |
1028 | ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); | |
1029 | smp_call_function_interrupt(); | |
1030 | } | |
1031 | ||
3b16cf87 JA |
1032 | void smp_qic_call_function_single_interrupt(struct pt_regs *regs) |
1033 | { | |
1034 | ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI); | |
1035 | smp_call_function_single_interrupt(); | |
1036 | } | |
1037 | ||
75604d7f | 1038 | void smp_vic_cpi_interrupt(struct pt_regs *regs) |
1da177e4 | 1039 | { |
7d12e780 | 1040 | struct pt_regs *old_regs = set_irq_regs(regs); |
1da177e4 LT |
1041 | __u8 cpu = smp_processor_id(); |
1042 | ||
a4ec1eff | 1043 | if (is_cpu_quad()) |
1da177e4 LT |
1044 | ack_QIC_CPI(VIC_CPI_LEVEL0); |
1045 | else | |
1046 | ack_VIC_CPI(VIC_CPI_LEVEL0); | |
1047 | ||
a4ec1eff | 1048 | if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) |
7d12e780 | 1049 | wrapper_smp_local_timer_interrupt(); |
a4ec1eff | 1050 | if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1051 | smp_invalidate_interrupt(); |
a4ec1eff | 1052 | if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1053 | smp_reschedule_interrupt(); |
a4ec1eff | 1054 | if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1055 | smp_enable_irq_interrupt(); |
a4ec1eff | 1056 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1057 | smp_call_function_interrupt(); |
3b16cf87 JA |
1058 | if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu])) |
1059 | smp_call_function_single_interrupt(); | |
7d12e780 | 1060 | set_irq_regs(old_regs); |
1da177e4 LT |
1061 | } |
1062 | ||
a4ec1eff | 1063 | static void do_flush_tlb_all(void *info) |
1da177e4 LT |
1064 | { |
1065 | unsigned long cpu = smp_processor_id(); | |
1066 | ||
1067 | __flush_tlb_all(); | |
1068 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) | |
925596a0 | 1069 | voyager_leave_mm(cpu); |
1da177e4 LT |
1070 | } |
1071 | ||
1da177e4 | 1072 | /* flush the TLB of every active CPU in the system */ |
a4ec1eff | 1073 | void flush_tlb_all(void) |
1da177e4 | 1074 | { |
15c8b6c1 | 1075 | on_each_cpu(do_flush_tlb_all, 0, 1); |
1da177e4 LT |
1076 | } |
1077 | ||
1da177e4 | 1078 | /* send a reschedule CPI to one CPU by physical CPU number*/ |
a4ec1eff | 1079 | static void voyager_smp_send_reschedule(int cpu) |
1da177e4 LT |
1080 | { |
1081 | send_one_CPI(cpu, VIC_RESCHEDULE_CPI); | |
1082 | } | |
1083 | ||
a4ec1eff | 1084 | int hard_smp_processor_id(void) |
1da177e4 LT |
1085 | { |
1086 | __u8 i; | |
1087 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | |
a4ec1eff | 1088 | if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER) |
1da177e4 LT |
1089 | return cpumask & 0x1F; |
1090 | ||
a4ec1eff IM |
1091 | for (i = 0; i < 8; i++) { |
1092 | if (cpumask & (1 << i)) | |
1da177e4 LT |
1093 | return i; |
1094 | } | |
1095 | printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask); | |
1096 | return 0; | |
1097 | } | |
1098 | ||
a4ec1eff | 1099 | int safe_smp_processor_id(void) |
2654c08c FV |
1100 | { |
1101 | return hard_smp_processor_id(); | |
1102 | } | |
1103 | ||
1da177e4 | 1104 | /* broadcast a halt to all other CPUs */ |
a4ec1eff | 1105 | static void voyager_smp_send_stop(void) |
1da177e4 | 1106 | { |
8691e5a8 | 1107 | smp_call_function(smp_stop_cpu_function, NULL, 1); |
1da177e4 LT |
1108 | } |
1109 | ||
1110 | /* this function is triggered in time.c when a clock tick fires | |
1111 | * we need to re-broadcast the tick to all CPUs */ | |
a4ec1eff | 1112 | void smp_vic_timer_interrupt(void) |
1da177e4 LT |
1113 | { |
1114 | send_CPI_allbutself(VIC_TIMER_CPI); | |
7d12e780 | 1115 | smp_local_timer_interrupt(); |
1da177e4 LT |
1116 | } |
1117 | ||
1da177e4 LT |
1118 | /* local (per CPU) timer interrupt. It does both profiling and |
1119 | * process statistics/rescheduling. | |
1120 | * | |
1121 | * We do profiling in every local tick, statistics/rescheduling | |
1122 | * happen only every 'profiling multiplier' ticks. The default | |
1123 | * multiplier is 1 and it can be changed by writing the new multiplier | |
1124 | * value into /proc/profile. | |
1125 | */ | |
a4ec1eff | 1126 | void smp_local_timer_interrupt(void) |
1da177e4 LT |
1127 | { |
1128 | int cpu = smp_processor_id(); | |
1129 | long weight; | |
1130 | ||
7d12e780 | 1131 | profile_tick(CPU_PROFILING); |
1da177e4 LT |
1132 | if (--per_cpu(prof_counter, cpu) <= 0) { |
1133 | /* | |
1134 | * The multiplier may have changed since the last time we got | |
1135 | * to this point as a result of the user writing to | |
1136 | * /proc/profile. In this case we need to adjust the APIC | |
1137 | * timer accordingly. | |
1138 | * | |
1139 | * Interrupts are already masked off at this point. | |
1140 | */ | |
a4ec1eff | 1141 | per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu); |
1da177e4 | 1142 | if (per_cpu(prof_counter, cpu) != |
a4ec1eff | 1143 | per_cpu(prof_old_multiplier, cpu)) { |
1da177e4 LT |
1144 | /* FIXME: need to update the vic timer tick here */ |
1145 | per_cpu(prof_old_multiplier, cpu) = | |
a4ec1eff | 1146 | per_cpu(prof_counter, cpu); |
1da177e4 LT |
1147 | } |
1148 | ||
81c06b10 | 1149 | update_process_times(user_mode_vm(get_irq_regs())); |
1da177e4 LT |
1150 | } |
1151 | ||
a4ec1eff | 1152 | if (((1 << cpu) & voyager_extended_vic_processors) == 0) |
1da177e4 LT |
1153 | /* only extended VIC processors participate in |
1154 | * interrupt distribution */ | |
1155 | return; | |
1156 | ||
1157 | /* | |
1158 | * We take the 'long' return path, and there every subsystem | |
27b46d76 | 1159 | * grabs the appropriate locks (kernel lock/ irq lock). |
1da177e4 LT |
1160 | * |
1161 | * we might want to decouple profiling from the 'long path', | |
1162 | * and do the profiling totally in assembly. | |
1163 | * | |
1164 | * Currently this isn't too much of an issue (performance wise), | |
1165 | * we can take more than 100K local irqs per second on a 100 MHz P5. | |
1166 | */ | |
1167 | ||
a4ec1eff | 1168 | if ((++vic_tick[cpu] & 0x7) != 0) |
1da177e4 LT |
1169 | return; |
1170 | /* get here every 16 ticks (about every 1/6 of a second) */ | |
1171 | ||
1172 | /* Change our priority to give someone else a chance at getting | |
a4ec1eff | 1173 | * the IRQ. The algorithm goes like this: |
1da177e4 LT |
1174 | * |
1175 | * In the VIC, the dynamically routed interrupt is always | |
1176 | * handled by the lowest priority eligible (i.e. receiving | |
1177 | * interrupts) CPU. If >1 eligible CPUs are equal lowest, the | |
1178 | * lowest processor number gets it. | |
1179 | * | |
1180 | * The priority of a CPU is controlled by a special per-CPU | |
1181 | * VIC priority register which is 3 bits wide 0 being lowest | |
1182 | * and 7 highest priority.. | |
1183 | * | |
1184 | * Therefore we subtract the average number of interrupts from | |
1185 | * the number we've fielded. If this number is negative, we | |
1186 | * lower the activity count and if it is positive, we raise | |
1187 | * it. | |
1188 | * | |
1189 | * I'm afraid this still leads to odd looking interrupt counts: | |
1190 | * the totals are all roughly equal, but the individual ones | |
1191 | * look rather skewed. | |
1192 | * | |
1193 | * FIXME: This algorithm is total crap when mixed with SMP | |
1194 | * affinity code since we now try to even up the interrupt | |
1195 | * counts when an affinity binding is keeping them on a | |
1196 | * particular CPU*/ | |
a4ec1eff | 1197 | weight = (vic_intr_count[cpu] * voyager_extended_cpus |
1da177e4 LT |
1198 | - vic_intr_total) >> 4; |
1199 | weight += 4; | |
a4ec1eff | 1200 | if (weight > 7) |
1da177e4 | 1201 | weight = 7; |
a4ec1eff | 1202 | if (weight < 0) |
1da177e4 | 1203 | weight = 0; |
a4ec1eff IM |
1204 | |
1205 | outb((__u8) weight, VIC_PRIORITY_REGISTER); | |
1da177e4 LT |
1206 | |
1207 | #ifdef VOYAGER_DEBUG | |
a4ec1eff | 1208 | if ((vic_tick[cpu] & 0xFFF) == 0) { |
1da177e4 LT |
1209 | /* print this message roughly every 25 secs */ |
1210 | printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n", | |
1211 | cpu, vic_tick[cpu], weight); | |
1212 | } | |
1213 | #endif | |
1214 | } | |
1215 | ||
1216 | /* setup the profiling timer */ | |
a4ec1eff | 1217 | int setup_profiling_timer(unsigned int multiplier) |
1da177e4 LT |
1218 | { |
1219 | int i; | |
1220 | ||
a4ec1eff | 1221 | if ((!multiplier)) |
1da177e4 LT |
1222 | return -EINVAL; |
1223 | ||
a4ec1eff | 1224 | /* |
1da177e4 LT |
1225 | * Set the new multiplier for each CPU. CPUs don't start using the |
1226 | * new values until the next timer interrupt in which they do process | |
1227 | * accounting. | |
1228 | */ | |
9628937d | 1229 | for (i = 0; i < nr_cpu_ids; ++i) |
1da177e4 LT |
1230 | per_cpu(prof_multiplier, i) = multiplier; |
1231 | ||
1232 | return 0; | |
1233 | } | |
1234 | ||
c771746e JB |
1235 | /* This is a bit of a mess, but forced on us by the genirq changes |
1236 | * there's no genirq handler that really does what voyager wants | |
1237 | * so hack it up with the simple IRQ handler */ | |
75604d7f | 1238 | static void handle_vic_irq(unsigned int irq, struct irq_desc *desc) |
c771746e JB |
1239 | { |
1240 | before_handle_vic_irq(irq); | |
1241 | handle_simple_irq(irq, desc); | |
1242 | after_handle_vic_irq(irq); | |
1243 | } | |
1244 | ||
1da177e4 LT |
1245 | /* The CPIs are handled in the per cpu 8259s, so they must be |
1246 | * enabled to be received: FIX: enabling the CPIs in the early | |
1247 | * boot sequence interferes with bug checking; enable them later | |
1248 | * on in smp_init */ | |
1249 | #define VIC_SET_GATE(cpi, vector) \ | |
1250 | set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector)) | |
1251 | #define QIC_SET_GATE(cpi, vector) \ | |
1252 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) | |
1253 | ||
73557af5 | 1254 | void __init voyager_smp_intr_init(void) |
1da177e4 LT |
1255 | { |
1256 | int i; | |
1257 | ||
1258 | /* initialize the per cpu irq mask to all disabled */ | |
9628937d | 1259 | for (i = 0; i < nr_cpu_ids; i++) |
1da177e4 LT |
1260 | vic_irq_mask[i] = 0xFFFF; |
1261 | ||
1262 | VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); | |
1263 | ||
1264 | VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt); | |
1265 | VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt); | |
1266 | ||
1267 | QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt); | |
1268 | QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt); | |
1269 | QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt); | |
1270 | QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt); | |
1271 | QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt); | |
1da177e4 | 1272 | |
a4ec1eff | 1273 | /* now put the VIC descriptor into the first 48 IRQs |
1da177e4 LT |
1274 | * |
1275 | * This is for later: first 16 correspond to PC IRQs; next 16 | |
1276 | * are Primary MC IRQs and final 16 are Secondary MC IRQs */ | |
a4ec1eff | 1277 | for (i = 0; i < 48; i++) |
c771746e | 1278 | set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq); |
1da177e4 LT |
1279 | } |
1280 | ||
1281 | /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per | |
1282 | * processor to receive CPI */ | |
a4ec1eff | 1283 | static void send_CPI(__u32 cpuset, __u8 cpi) |
1da177e4 LT |
1284 | { |
1285 | int cpu; | |
1286 | __u32 quad_cpuset = (cpuset & voyager_quad_processors); | |
1287 | ||
a4ec1eff IM |
1288 | if (cpi < VIC_START_FAKE_CPI) { |
1289 | /* fake CPI are only used for booting, so send to the | |
1da177e4 | 1290 | * extended quads as well---Quads must be VIC booted */ |
a4ec1eff | 1291 | outb((__u8) (cpuset), VIC_CPI_Registers[cpi]); |
1da177e4 LT |
1292 | return; |
1293 | } | |
a4ec1eff | 1294 | if (quad_cpuset) |
1da177e4 LT |
1295 | send_QIC_CPI(quad_cpuset, cpi); |
1296 | cpuset &= ~quad_cpuset; | |
1297 | cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ | |
a4ec1eff | 1298 | if (cpuset == 0) |
1da177e4 LT |
1299 | return; |
1300 | for_each_online_cpu(cpu) { | |
a4ec1eff | 1301 | if (cpuset & (1 << cpu)) |
1da177e4 LT |
1302 | set_bit(cpi, &vic_cpi_mailbox[cpu]); |
1303 | } | |
a4ec1eff IM |
1304 | if (cpuset) |
1305 | outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]); | |
1da177e4 LT |
1306 | } |
1307 | ||
1308 | /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and | |
1309 | * set the cache line to shared by reading it. | |
1310 | * | |
1311 | * DON'T make this inline otherwise the cache line read will be | |
1312 | * optimised away | |
1313 | * */ | |
a4ec1eff IM |
1314 | static int ack_QIC_CPI(__u8 cpi) |
1315 | { | |
1da177e4 LT |
1316 | __u8 cpu = hard_smp_processor_id(); |
1317 | ||
1318 | cpi &= 7; | |
1319 | ||
a4ec1eff | 1320 | outb(1 << cpi, QIC_INTERRUPT_CLEAR1); |
1da177e4 LT |
1321 | return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi; |
1322 | } | |
1323 | ||
a4ec1eff | 1324 | static void ack_special_QIC_CPI(__u8 cpi) |
1da177e4 | 1325 | { |
a4ec1eff | 1326 | switch (cpi) { |
1da177e4 LT |
1327 | case VIC_CMN_INT: |
1328 | outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0); | |
1329 | break; | |
1330 | case VIC_SYS_INT: | |
1331 | outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0); | |
1332 | break; | |
1333 | } | |
1334 | /* also clear at the VIC, just in case (nop for non-extended proc) */ | |
1335 | ack_VIC_CPI(cpi); | |
1336 | } | |
1337 | ||
1338 | /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */ | |
a4ec1eff | 1339 | static void ack_VIC_CPI(__u8 cpi) |
1da177e4 LT |
1340 | { |
1341 | #ifdef VOYAGER_DEBUG | |
1342 | unsigned long flags; | |
1343 | __u16 isr; | |
1344 | __u8 cpu = smp_processor_id(); | |
1345 | ||
1346 | local_irq_save(flags); | |
1347 | isr = vic_read_isr(); | |
a4ec1eff | 1348 | if ((isr & (1 << (cpi & 7))) == 0) { |
1da177e4 LT |
1349 | printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi); |
1350 | } | |
1351 | #endif | |
1352 | /* send specific EOI; the two system interrupts have | |
1353 | * bit 4 set for a separate vector but behave as the | |
1354 | * corresponding 3 bit intr */ | |
a4ec1eff | 1355 | outb_p(0x60 | (cpi & 7), 0x20); |
1da177e4 LT |
1356 | |
1357 | #ifdef VOYAGER_DEBUG | |
a4ec1eff | 1358 | if ((vic_read_isr() & (1 << (cpi & 7))) != 0) { |
1da177e4 LT |
1359 | printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi); |
1360 | } | |
1361 | local_irq_restore(flags); | |
1362 | #endif | |
1363 | } | |
1364 | ||
1365 | /* cribbed with thanks from irq.c */ | |
a4ec1eff | 1366 | #define __byte(x,y) (((unsigned char *)&(y))[x]) |
1da177e4 LT |
1367 | #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu])) |
1368 | #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu])) | |
1369 | ||
a4ec1eff | 1370 | static unsigned int startup_vic_irq(unsigned int irq) |
1da177e4 | 1371 | { |
c771746e | 1372 | unmask_vic_irq(irq); |
1da177e4 LT |
1373 | |
1374 | return 0; | |
1375 | } | |
1376 | ||
1377 | /* The enable and disable routines. This is where we run into | |
1378 | * conflicting architectural philosophy. Fundamentally, the voyager | |
1379 | * architecture does not expect to have to disable interrupts globally | |
1380 | * (the IRQ controllers belong to each CPU). The processor masquerade | |
1381 | * which is used to start the system shouldn't be used in a running OS | |
1382 | * since it will cause great confusion if two separate CPUs drive to | |
1383 | * the same IRQ controller (I know, I've tried it). | |
1384 | * | |
1385 | * The solution is a variant on the NCR lazy SPL design: | |
1386 | * | |
1387 | * 1) To disable an interrupt, do nothing (other than set the | |
1388 | * IRQ_DISABLED flag). This dares the interrupt actually to arrive. | |
1389 | * | |
1390 | * 2) If the interrupt dares to come in, raise the local mask against | |
1391 | * it (this will result in all the CPU masks being raised | |
1392 | * eventually). | |
1393 | * | |
1394 | * 3) To enable the interrupt, lower the mask on the local CPU and | |
1395 | * broadcast an Interrupt enable CPI which causes all other CPUs to | |
1396 | * adjust their masks accordingly. */ | |
1397 | ||
a4ec1eff | 1398 | static void unmask_vic_irq(unsigned int irq) |
1da177e4 LT |
1399 | { |
1400 | /* linux doesn't to processor-irq affinity, so enable on | |
1401 | * all CPUs we know about */ | |
1402 | int cpu = smp_processor_id(), real_cpu; | |
a4ec1eff | 1403 | __u16 mask = (1 << irq); |
1da177e4 LT |
1404 | __u32 processorList = 0; |
1405 | unsigned long flags; | |
1406 | ||
c771746e | 1407 | VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n", |
1da177e4 LT |
1408 | irq, cpu, cpu_irq_affinity[cpu])); |
1409 | spin_lock_irqsave(&vic_irq_lock, flags); | |
1410 | for_each_online_cpu(real_cpu) { | |
a4ec1eff | 1411 | if (!(voyager_extended_vic_processors & (1 << real_cpu))) |
1da177e4 | 1412 | continue; |
a4ec1eff | 1413 | if (!(cpu_irq_affinity[real_cpu] & mask)) { |
1da177e4 LT |
1414 | /* irq has no affinity for this CPU, ignore */ |
1415 | continue; | |
1416 | } | |
a4ec1eff | 1417 | if (real_cpu == cpu) { |
1da177e4 | 1418 | enable_local_vic_irq(irq); |
a4ec1eff | 1419 | } else if (vic_irq_mask[real_cpu] & mask) { |
1da177e4 | 1420 | vic_irq_enable_mask[real_cpu] |= mask; |
a4ec1eff | 1421 | processorList |= (1 << real_cpu); |
1da177e4 LT |
1422 | } |
1423 | } | |
1424 | spin_unlock_irqrestore(&vic_irq_lock, flags); | |
a4ec1eff | 1425 | if (processorList) |
1da177e4 LT |
1426 | send_CPI(processorList, VIC_ENABLE_IRQ_CPI); |
1427 | } | |
1428 | ||
a4ec1eff | 1429 | static void mask_vic_irq(unsigned int irq) |
1da177e4 LT |
1430 | { |
1431 | /* lazy disable, do nothing */ | |
1432 | } | |
1433 | ||
a4ec1eff | 1434 | static void enable_local_vic_irq(unsigned int irq) |
1da177e4 LT |
1435 | { |
1436 | __u8 cpu = smp_processor_id(); | |
1437 | __u16 mask = ~(1 << irq); | |
1438 | __u16 old_mask = vic_irq_mask[cpu]; | |
1439 | ||
1440 | vic_irq_mask[cpu] &= mask; | |
a4ec1eff | 1441 | if (vic_irq_mask[cpu] == old_mask) |
1da177e4 LT |
1442 | return; |
1443 | ||
1444 | VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n", | |
1445 | irq, cpu)); | |
1446 | ||
1447 | if (irq & 8) { | |
a4ec1eff | 1448 | outb_p(cached_A1(cpu), 0xA1); |
1da177e4 | 1449 | (void)inb_p(0xA1); |
a4ec1eff IM |
1450 | } else { |
1451 | outb_p(cached_21(cpu), 0x21); | |
1da177e4 LT |
1452 | (void)inb_p(0x21); |
1453 | } | |
1454 | } | |
1455 | ||
a4ec1eff | 1456 | static void disable_local_vic_irq(unsigned int irq) |
1da177e4 LT |
1457 | { |
1458 | __u8 cpu = smp_processor_id(); | |
1459 | __u16 mask = (1 << irq); | |
1460 | __u16 old_mask = vic_irq_mask[cpu]; | |
1461 | ||
a4ec1eff | 1462 | if (irq == 7) |
1da177e4 LT |
1463 | return; |
1464 | ||
1465 | vic_irq_mask[cpu] |= mask; | |
a4ec1eff | 1466 | if (old_mask == vic_irq_mask[cpu]) |
1da177e4 LT |
1467 | return; |
1468 | ||
1469 | VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n", | |
1470 | irq, cpu)); | |
1471 | ||
1472 | if (irq & 8) { | |
a4ec1eff | 1473 | outb_p(cached_A1(cpu), 0xA1); |
1da177e4 | 1474 | (void)inb_p(0xA1); |
a4ec1eff IM |
1475 | } else { |
1476 | outb_p(cached_21(cpu), 0x21); | |
1da177e4 LT |
1477 | (void)inb_p(0x21); |
1478 | } | |
1479 | } | |
1480 | ||
1481 | /* The VIC is level triggered, so the ack can only be issued after the | |
1482 | * interrupt completes. However, we do Voyager lazy interrupt | |
1483 | * handling here: It is an extremely expensive operation to mask an | |
1484 | * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If | |
1485 | * this interrupt actually comes in, then we mask and ack here to push | |
1486 | * the interrupt off to another CPU */ | |
a4ec1eff | 1487 | static void before_handle_vic_irq(unsigned int irq) |
1da177e4 | 1488 | { |
08678b08 | 1489 | irq_desc_t *desc = irq_to_desc(irq); |
1da177e4 LT |
1490 | __u8 cpu = smp_processor_id(); |
1491 | ||
1492 | _raw_spin_lock(&vic_irq_lock); | |
1493 | vic_intr_total++; | |
1494 | vic_intr_count[cpu]++; | |
1495 | ||
a4ec1eff | 1496 | if (!(cpu_irq_affinity[cpu] & (1 << irq))) { |
1da177e4 LT |
1497 | /* The irq is not in our affinity mask, push it off |
1498 | * onto another CPU */ | |
a4ec1eff IM |
1499 | VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d " |
1500 | "on cpu %d\n", irq, cpu)); | |
1da177e4 LT |
1501 | disable_local_vic_irq(irq); |
1502 | /* set IRQ_INPROGRESS to prevent the handler in irq.c from | |
1503 | * actually calling the interrupt routine */ | |
1504 | desc->status |= IRQ_REPLAY | IRQ_INPROGRESS; | |
a4ec1eff | 1505 | } else if (desc->status & IRQ_DISABLED) { |
1da177e4 LT |
1506 | /* Damn, the interrupt actually arrived, do the lazy |
1507 | * disable thing. The interrupt routine in irq.c will | |
1508 | * not handle a IRQ_DISABLED interrupt, so nothing more | |
1509 | * need be done here */ | |
1510 | VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n", | |
1511 | irq, cpu)); | |
1512 | disable_local_vic_irq(irq); | |
1513 | desc->status |= IRQ_REPLAY; | |
1514 | } else { | |
1515 | desc->status &= ~IRQ_REPLAY; | |
1516 | } | |
1517 | ||
1518 | _raw_spin_unlock(&vic_irq_lock); | |
1519 | } | |
1520 | ||
1521 | /* Finish the VIC interrupt: basically mask */ | |
a4ec1eff | 1522 | static void after_handle_vic_irq(unsigned int irq) |
1da177e4 | 1523 | { |
08678b08 | 1524 | irq_desc_t *desc = irq_to_desc(irq); |
1da177e4 LT |
1525 | |
1526 | _raw_spin_lock(&vic_irq_lock); | |
1527 | { | |
1528 | unsigned int status = desc->status & ~IRQ_INPROGRESS; | |
1529 | #ifdef VOYAGER_DEBUG | |
1530 | __u16 isr; | |
1531 | #endif | |
1532 | ||
1533 | desc->status = status; | |
1534 | if ((status & IRQ_DISABLED)) | |
1535 | disable_local_vic_irq(irq); | |
1536 | #ifdef VOYAGER_DEBUG | |
1537 | /* DEBUG: before we ack, check what's in progress */ | |
1538 | isr = vic_read_isr(); | |
a4ec1eff | 1539 | if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) { |
1da177e4 LT |
1540 | int i; |
1541 | __u8 cpu = smp_processor_id(); | |
1542 | __u8 real_cpu; | |
a4ec1eff | 1543 | int mask; /* Um... initialize me??? --RR */ |
1da177e4 LT |
1544 | |
1545 | printk("VOYAGER SMP: CPU%d lost interrupt %d\n", | |
1546 | cpu, irq); | |
c8912599 | 1547 | for_each_possible_cpu(real_cpu, mask) { |
1da177e4 LT |
1548 | |
1549 | outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, | |
1550 | VIC_PROCESSOR_ID); | |
1551 | isr = vic_read_isr(); | |
a4ec1eff IM |
1552 | if (isr & (1 << irq)) { |
1553 | printk | |
1554 | ("VOYAGER SMP: CPU%d ack irq %d\n", | |
1555 | real_cpu, irq); | |
1da177e4 LT |
1556 | ack_vic_irq(irq); |
1557 | } | |
1558 | outb(cpu, VIC_PROCESSOR_ID); | |
1559 | } | |
1560 | } | |
1561 | #endif /* VOYAGER_DEBUG */ | |
1562 | /* as soon as we ack, the interrupt is eligible for | |
1563 | * receipt by another CPU so everything must be in | |
1564 | * order here */ | |
1565 | ack_vic_irq(irq); | |
a4ec1eff | 1566 | if (status & IRQ_REPLAY) { |
1da177e4 LT |
1567 | /* replay is set if we disable the interrupt |
1568 | * in the before_handle_vic_irq() routine, so | |
1569 | * clear the in progress bit here to allow the | |
1570 | * next CPU to handle this correctly */ | |
1571 | desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS); | |
1572 | } | |
1573 | #ifdef VOYAGER_DEBUG | |
1574 | isr = vic_read_isr(); | |
a4ec1eff IM |
1575 | if ((isr & (1 << irq)) != 0) |
1576 | printk("VOYAGER SMP: after_handle_vic_irq() after " | |
1577 | "ack irq=%d, isr=0x%x\n", irq, isr); | |
1da177e4 LT |
1578 | #endif /* VOYAGER_DEBUG */ |
1579 | } | |
1580 | _raw_spin_unlock(&vic_irq_lock); | |
1581 | ||
1582 | /* All code after this point is out of the main path - the IRQ | |
1583 | * may be intercepted by another CPU if reasserted */ | |
1584 | } | |
1585 | ||
1da177e4 LT |
1586 | /* Linux processor - interrupt affinity manipulations. |
1587 | * | |
1588 | * For each processor, we maintain a 32 bit irq affinity mask. | |
1589 | * Initially it is set to all 1's so every processor accepts every | |
1590 | * interrupt. In this call, we change the processor's affinity mask: | |
1591 | * | |
1592 | * Change from enable to disable: | |
1593 | * | |
1594 | * If the interrupt ever comes in to the processor, we will disable it | |
1595 | * and ack it to push it off to another CPU, so just accept the mask here. | |
1596 | * | |
1597 | * Change from disable to enable: | |
1598 | * | |
1599 | * change the mask and then do an interrupt enable CPI to re-enable on | |
1600 | * the selected processors */ | |
1601 | ||
a4ec1eff | 1602 | void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) |
1da177e4 LT |
1603 | { |
1604 | /* Only extended processors handle interrupts */ | |
1605 | unsigned long real_mask; | |
1606 | unsigned long irq_mask = 1 << irq; | |
1607 | int cpu; | |
1608 | ||
1609 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; | |
a4ec1eff IM |
1610 | |
1611 | if (cpus_addr(mask)[0] == 0) | |
27b46d76 | 1612 | /* can't have no CPUs to accept the interrupt -- extremely |
1da177e4 LT |
1613 | * bad things will happen */ |
1614 | return; | |
1615 | ||
a4ec1eff | 1616 | if (irq == 0) |
1da177e4 LT |
1617 | /* can't change the affinity of the timer IRQ. This |
1618 | * is due to the constraint in the voyager | |
1619 | * architecture that the CPI also comes in on and IRQ | |
1620 | * line and we have chosen IRQ0 for this. If you | |
1621 | * raise the mask on this interrupt, the processor | |
1622 | * will no-longer be able to accept VIC CPIs */ | |
1623 | return; | |
1624 | ||
a4ec1eff | 1625 | if (irq >= 32) |
1da177e4 LT |
1626 | /* You can only have 32 interrupts in a voyager system |
1627 | * (and 32 only if you have a secondary microchannel | |
1628 | * bus) */ | |
1629 | return; | |
1630 | ||
1631 | for_each_online_cpu(cpu) { | |
1632 | unsigned long cpu_mask = 1 << cpu; | |
a4ec1eff IM |
1633 | |
1634 | if (cpu_mask & real_mask) { | |
1da177e4 LT |
1635 | /* enable the interrupt for this cpu */ |
1636 | cpu_irq_affinity[cpu] |= irq_mask; | |
1637 | } else { | |
1638 | /* disable the interrupt for this cpu */ | |
1639 | cpu_irq_affinity[cpu] &= ~irq_mask; | |
1640 | } | |
1641 | } | |
1642 | /* this is magic, we now have the correct affinity maps, so | |
1643 | * enable the interrupt. This will send an enable CPI to | |
27b46d76 | 1644 | * those CPUs who need to enable it in their local masks, |
1da177e4 LT |
1645 | * causing them to correct for the new affinity . If the |
1646 | * interrupt is currently globally disabled, it will simply be | |
1647 | * disabled again as it comes in (voyager lazy disable). If | |
1648 | * the affinity map is tightened to disable the interrupt on a | |
1649 | * cpu, it will be pushed off when it comes in */ | |
c771746e | 1650 | unmask_vic_irq(irq); |
1da177e4 LT |
1651 | } |
1652 | ||
a4ec1eff | 1653 | static void ack_vic_irq(unsigned int irq) |
1da177e4 LT |
1654 | { |
1655 | if (irq & 8) { | |
a4ec1eff IM |
1656 | outb(0x62, 0x20); /* Specific EOI to cascade */ |
1657 | outb(0x60 | (irq & 7), 0xA0); | |
1da177e4 | 1658 | } else { |
a4ec1eff | 1659 | outb(0x60 | (irq & 7), 0x20); |
1da177e4 LT |
1660 | } |
1661 | } | |
1662 | ||
1663 | /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259 | |
1664 | * but are not vectored by it. This means that the 8259 mask must be | |
1665 | * lowered to receive them */ | |
a4ec1eff | 1666 | static __init void vic_enable_cpi(void) |
1da177e4 LT |
1667 | { |
1668 | __u8 cpu = smp_processor_id(); | |
a4ec1eff | 1669 | |
1da177e4 LT |
1670 | /* just take a copy of the current mask (nop for boot cpu) */ |
1671 | vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id]; | |
1672 | ||
1673 | enable_local_vic_irq(VIC_CPI_LEVEL0); | |
1674 | enable_local_vic_irq(VIC_CPI_LEVEL1); | |
1675 | /* for sys int and cmn int */ | |
1676 | enable_local_vic_irq(7); | |
1677 | ||
a4ec1eff | 1678 | if (is_cpu_quad()) { |
1da177e4 LT |
1679 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); |
1680 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | |
1681 | VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n", | |
1682 | cpu, QIC_CPI_ENABLE)); | |
1683 | } | |
1684 | ||
1685 | VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n", | |
1686 | cpu, vic_irq_mask[cpu])); | |
1687 | } | |
1688 | ||
a4ec1eff | 1689 | void voyager_smp_dump() |
1da177e4 LT |
1690 | { |
1691 | int old_cpu = smp_processor_id(), cpu; | |
1692 | ||
1693 | /* dump the interrupt masks of each processor */ | |
1694 | for_each_online_cpu(cpu) { | |
1695 | __u16 imr, isr, irr; | |
1696 | unsigned long flags; | |
1697 | ||
1698 | local_irq_save(flags); | |
1699 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | |
1700 | imr = (inb(0xa1) << 8) | inb(0x21); | |
1701 | outb(0x0a, 0xa0); | |
1702 | irr = inb(0xa0) << 8; | |
1703 | outb(0x0a, 0x20); | |
1704 | irr |= inb(0x20); | |
1705 | outb(0x0b, 0xa0); | |
1706 | isr = inb(0xa0) << 8; | |
1707 | outb(0x0b, 0x20); | |
1708 | isr |= inb(0x20); | |
1709 | outb(old_cpu, VIC_PROCESSOR_ID); | |
1710 | local_irq_restore(flags); | |
1711 | printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n", | |
1712 | cpu, vic_irq_mask[cpu], imr, irr, isr); | |
1713 | #if 0 | |
1714 | /* These lines are put in to try to unstick an un ack'd irq */ | |
a4ec1eff | 1715 | if (isr != 0) { |
1da177e4 | 1716 | int irq; |
a4ec1eff IM |
1717 | for (irq = 0; irq < 16; irq++) { |
1718 | if (isr & (1 << irq)) { | |
1da177e4 LT |
1719 | printk("\tCPU%d: ack irq %d\n", |
1720 | cpu, irq); | |
1721 | local_irq_save(flags); | |
1722 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, | |
1723 | VIC_PROCESSOR_ID); | |
1724 | ack_vic_irq(irq); | |
1725 | outb(old_cpu, VIC_PROCESSOR_ID); | |
1726 | local_irq_restore(flags); | |
1727 | } | |
1728 | } | |
1729 | } | |
1730 | #endif | |
1731 | } | |
1732 | } | |
1733 | ||
a4ec1eff | 1734 | void smp_voyager_power_off(void *dummy) |
1da177e4 | 1735 | { |
a4ec1eff | 1736 | if (smp_processor_id() == boot_cpu_id) |
1da177e4 LT |
1737 | voyager_power_off(); |
1738 | else | |
1739 | smp_stop_cpu_function(NULL); | |
1740 | } | |
1741 | ||
a4ec1eff | 1742 | static void __init voyager_smp_prepare_cpus(unsigned int max_cpus) |
1da177e4 LT |
1743 | { |
1744 | /* FIXME: ignore max_cpus for now */ | |
1745 | smp_boot_cpus(); | |
1746 | } | |
1747 | ||
8f818210 | 1748 | static void __cpuinit voyager_smp_prepare_boot_cpu(void) |
1da177e4 | 1749 | { |
6a3ee3d5 JF |
1750 | init_gdt(smp_processor_id()); |
1751 | switch_to_new_gdt(); | |
1752 | ||
1da177e4 LT |
1753 | cpu_set(smp_processor_id(), cpu_online_map); |
1754 | cpu_set(smp_processor_id(), cpu_callout_map); | |
4ad8d383 | 1755 | cpu_set(smp_processor_id(), cpu_possible_map); |
3c101cf0 | 1756 | cpu_set(smp_processor_id(), cpu_present_map); |
1da177e4 LT |
1757 | } |
1758 | ||
a4ec1eff | 1759 | static int __cpuinit voyager_cpu_up(unsigned int cpu) |
1da177e4 LT |
1760 | { |
1761 | /* This only works at boot for x86. See "rewrite" above. */ | |
1762 | if (cpu_isset(cpu, smp_commenced_mask)) | |
1763 | return -ENOSYS; | |
1764 | ||
1765 | /* In case one didn't come up */ | |
1766 | if (!cpu_isset(cpu, cpu_callin_map)) | |
1767 | return -EIO; | |
1768 | /* Unleash the CPU! */ | |
1769 | cpu_set(cpu, smp_commenced_mask); | |
7c04e64a | 1770 | while (!cpu_online(cpu)) |
1da177e4 LT |
1771 | mb(); |
1772 | return 0; | |
1773 | } | |
1774 | ||
a4ec1eff | 1775 | static void __init voyager_smp_cpus_done(unsigned int max_cpus) |
1da177e4 LT |
1776 | { |
1777 | zap_low_mappings(); | |
1778 | } | |
033ab7f8 | 1779 | |
a4ec1eff | 1780 | void __init smp_setup_processor_id(void) |
033ab7f8 AM |
1781 | { |
1782 | current_thread_info()->cpu = hard_smp_processor_id(); | |
6a3ee3d5 | 1783 | x86_write_percpu(cpu_number, hard_smp_processor_id()); |
033ab7f8 | 1784 | } |
6a3ee3d5 | 1785 | |
6cd10f8d JB |
1786 | static void voyager_send_call_func(cpumask_t callmask) |
1787 | { | |
1788 | __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); | |
1789 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | |
1790 | } | |
1791 | ||
1792 | static void voyager_send_call_func_single(int cpu) | |
1793 | { | |
1794 | send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI); | |
1795 | } | |
1796 | ||
6a3ee3d5 JF |
1797 | struct smp_ops smp_ops = { |
1798 | .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, | |
1799 | .smp_prepare_cpus = voyager_smp_prepare_cpus, | |
1800 | .cpu_up = voyager_cpu_up, | |
1801 | .smp_cpus_done = voyager_smp_cpus_done, | |
1802 | ||
1803 | .smp_send_stop = voyager_smp_send_stop, | |
1804 | .smp_send_reschedule = voyager_smp_send_reschedule, | |
3b16cf87 | 1805 | |
6cd10f8d JB |
1806 | .send_call_func_ipi = voyager_send_call_func, |
1807 | .send_call_func_single_ipi = voyager_send_call_func_single, | |
6a3ee3d5 | 1808 | }; |