Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ |
2 | ||
3 | /* Copyright (C) 1999,2001 | |
4 | * | |
5 | * Author: J.E.J.Bottomley@HansenPartnership.com | |
6 | * | |
7 | * linux/arch/i386/kernel/voyager_smp.c | |
8 | * | |
9 | * This file provides all the same external entries as smp.c but uses | |
10 | * the voyager hal to provide the functionality | |
11 | */ | |
153f8057 | 12 | #include <linux/module.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/kernel_stat.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/mc146818rtc.h> | |
17 | #include <linux/cache.h> | |
18 | #include <linux/interrupt.h> | |
1da177e4 LT |
19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | |
21 | #include <linux/bootmem.h> | |
22 | #include <linux/completion.h> | |
23 | #include <asm/desc.h> | |
24 | #include <asm/voyager.h> | |
25 | #include <asm/vic.h> | |
26 | #include <asm/mtrr.h> | |
27 | #include <asm/pgalloc.h> | |
28 | #include <asm/tlbflush.h> | |
29 | #include <asm/arch_hooks.h> | |
30 | ||
1da177e4 | 31 | /* TLB state -- visible externally, indexed physically */ |
0cca1ca6 | 32 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; |
1da177e4 LT |
33 | |
34 | /* CPU IRQ affinity -- set to all ones initially */ | |
a4ec1eff IM |
35 | static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = |
36 | {[0 ... NR_CPUS-1] = ~0UL }; | |
1da177e4 LT |
37 | |
38 | /* per CPU data structure (for /proc/cpuinfo et al), visible externally | |
39 | * indexed physically */ | |
0cca1ca6 | 40 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
92cb7612 | 41 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
1da177e4 LT |
42 | |
43 | /* physical ID of the CPU used to boot the system */ | |
44 | unsigned char boot_cpu_id; | |
45 | ||
46 | /* The memory line addresses for the Quad CPIs */ | |
47 | struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned; | |
48 | ||
49 | /* The masks for the Extended VIC processors, filled in by cat_init */ | |
50 | __u32 voyager_extended_vic_processors = 0; | |
51 | ||
52 | /* Masks for the extended Quad processors which cannot be VIC booted */ | |
53 | __u32 voyager_allowed_boot_processors = 0; | |
54 | ||
55 | /* The mask for the Quad Processors (both extended and non-extended) */ | |
56 | __u32 voyager_quad_processors = 0; | |
57 | ||
58 | /* Total count of live CPUs, used in process.c to display | |
59 | * the CPU information and in irq.c for the per CPU irq | |
60 | * activity count. Finally exported by i386_ksyms.c */ | |
61 | static int voyager_extended_cpus = 1; | |
62 | ||
63 | /* Have we found an SMP box - used by time.c to do the profiling | |
64 | interrupt for timeslicing; do not set to 1 until the per CPU timer | |
65 | interrupt is active */ | |
66 | int smp_found_config = 0; | |
67 | ||
68 | /* Used for the invalidate map that's also checked in the spinlock */ | |
69 | static volatile unsigned long smp_invalidate_needed; | |
70 | ||
71 | /* Bitmask of currently online CPUs - used by setup.c for | |
72 | /proc/cpuinfo, visible externally but still physical */ | |
73 | cpumask_t cpu_online_map = CPU_MASK_NONE; | |
153f8057 | 74 | EXPORT_SYMBOL(cpu_online_map); |
1da177e4 LT |
75 | |
76 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used | |
77 | * by scheduler but indexed physically */ | |
78 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | |
79 | ||
1da177e4 LT |
80 | /* The internal functions */ |
81 | static void send_CPI(__u32 cpuset, __u8 cpi); | |
82 | static void ack_CPI(__u8 cpi); | |
83 | static int ack_QIC_CPI(__u8 cpi); | |
84 | static void ack_special_QIC_CPI(__u8 cpi); | |
85 | static void ack_VIC_CPI(__u8 cpi); | |
86 | static void send_CPI_allbutself(__u8 cpi); | |
c771746e JB |
87 | static void mask_vic_irq(unsigned int irq); |
88 | static void unmask_vic_irq(unsigned int irq); | |
1da177e4 LT |
89 | static unsigned int startup_vic_irq(unsigned int irq); |
90 | static void enable_local_vic_irq(unsigned int irq); | |
91 | static void disable_local_vic_irq(unsigned int irq); | |
92 | static void before_handle_vic_irq(unsigned int irq); | |
93 | static void after_handle_vic_irq(unsigned int irq); | |
94 | static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); | |
95 | static void ack_vic_irq(unsigned int irq); | |
96 | static void vic_enable_cpi(void); | |
97 | static void do_boot_cpu(__u8 cpuid); | |
98 | static void do_quad_bootstrap(void); | |
1da177e4 LT |
99 | |
100 | int hard_smp_processor_id(void); | |
2654c08c | 101 | int safe_smp_processor_id(void); |
1da177e4 LT |
102 | |
103 | /* Inline functions */ | |
a4ec1eff | 104 | static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi) |
1da177e4 LT |
105 | { |
106 | voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi = | |
a4ec1eff | 107 | (smp_processor_id() << 16) + cpi; |
1da177e4 LT |
108 | } |
109 | ||
a4ec1eff | 110 | static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi) |
1da177e4 LT |
111 | { |
112 | int cpu; | |
113 | ||
114 | for_each_online_cpu(cpu) { | |
a4ec1eff | 115 | if (cpuset & (1 << cpu)) { |
1da177e4 | 116 | #ifdef VOYAGER_DEBUG |
a4ec1eff IM |
117 | if (!cpu_isset(cpu, cpu_online_map)) |
118 | VDEBUG(("CPU%d sending cpi %d to CPU%d not in " | |
119 | "cpu_online_map\n", | |
120 | hard_smp_processor_id(), cpi, cpu)); | |
1da177e4 LT |
121 | #endif |
122 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); | |
123 | } | |
124 | } | |
125 | } | |
126 | ||
a4ec1eff | 127 | static inline void wrapper_smp_local_timer_interrupt(void) |
6431e6a2 DH |
128 | { |
129 | irq_enter(); | |
7d12e780 | 130 | smp_local_timer_interrupt(); |
6431e6a2 DH |
131 | irq_exit(); |
132 | } | |
133 | ||
a4ec1eff | 134 | static inline void send_one_CPI(__u8 cpu, __u8 cpi) |
1da177e4 | 135 | { |
a4ec1eff | 136 | if (voyager_quad_processors & (1 << cpu)) |
1da177e4 LT |
137 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); |
138 | else | |
a4ec1eff | 139 | send_CPI(1 << cpu, cpi); |
1da177e4 LT |
140 | } |
141 | ||
a4ec1eff | 142 | static inline void send_CPI_allbutself(__u8 cpi) |
1da177e4 LT |
143 | { |
144 | __u8 cpu = smp_processor_id(); | |
145 | __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu); | |
146 | send_CPI(mask, cpi); | |
147 | } | |
148 | ||
a4ec1eff | 149 | static inline int is_cpu_quad(void) |
1da177e4 LT |
150 | { |
151 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | |
152 | return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER); | |
153 | } | |
154 | ||
a4ec1eff | 155 | static inline int is_cpu_extended(void) |
1da177e4 LT |
156 | { |
157 | __u8 cpu = hard_smp_processor_id(); | |
158 | ||
a4ec1eff | 159 | return (voyager_extended_vic_processors & (1 << cpu)); |
1da177e4 LT |
160 | } |
161 | ||
a4ec1eff | 162 | static inline int is_cpu_vic_boot(void) |
1da177e4 LT |
163 | { |
164 | __u8 cpu = hard_smp_processor_id(); | |
165 | ||
a4ec1eff IM |
166 | return (voyager_extended_vic_processors |
167 | & voyager_allowed_boot_processors & (1 << cpu)); | |
1da177e4 LT |
168 | } |
169 | ||
a4ec1eff | 170 | static inline void ack_CPI(__u8 cpi) |
1da177e4 | 171 | { |
a4ec1eff | 172 | switch (cpi) { |
1da177e4 | 173 | case VIC_CPU_BOOT_CPI: |
a4ec1eff | 174 | if (is_cpu_quad() && !is_cpu_vic_boot()) |
1da177e4 LT |
175 | ack_QIC_CPI(cpi); |
176 | else | |
177 | ack_VIC_CPI(cpi); | |
178 | break; | |
179 | case VIC_SYS_INT: | |
a4ec1eff | 180 | case VIC_CMN_INT: |
1da177e4 LT |
181 | /* These are slightly strange. Even on the Quad card, |
182 | * They are vectored as VIC CPIs */ | |
a4ec1eff | 183 | if (is_cpu_quad()) |
1da177e4 LT |
184 | ack_special_QIC_CPI(cpi); |
185 | else | |
186 | ack_VIC_CPI(cpi); | |
187 | break; | |
188 | default: | |
189 | printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi); | |
190 | break; | |
191 | } | |
192 | } | |
193 | ||
194 | /* local variables */ | |
195 | ||
196 | /* The VIC IRQ descriptors -- these look almost identical to the | |
197 | * 8259 IRQs except that masks and things must be kept per processor | |
198 | */ | |
c771746e | 199 | static struct irq_chip vic_chip = { |
a4ec1eff IM |
200 | .name = "VIC", |
201 | .startup = startup_vic_irq, | |
202 | .mask = mask_vic_irq, | |
203 | .unmask = unmask_vic_irq, | |
204 | .set_affinity = set_vic_irq_affinity, | |
1da177e4 LT |
205 | }; |
206 | ||
207 | /* used to count up as CPUs are brought on line (starts at 0) */ | |
208 | static int cpucount = 0; | |
209 | ||
210 | /* steal a page from the bottom of memory for the trampoline and | |
211 | * squirrel its address away here. This will be in kernel virtual | |
212 | * space */ | |
213 | static __u32 trampoline_base; | |
214 | ||
215 | /* The per cpu profile stuff - used in smp_local_timer_interrupt */ | |
216 | static DEFINE_PER_CPU(int, prof_multiplier) = 1; | |
217 | static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; | |
a4ec1eff | 218 | static DEFINE_PER_CPU(int, prof_counter) = 1; |
1da177e4 LT |
219 | |
220 | /* the map used to check if a CPU has booted */ | |
221 | static __u32 cpu_booted_map; | |
222 | ||
223 | /* the synchronize flag used to hold all secondary CPUs spinning in | |
224 | * a tight loop until the boot sequence is ready for them */ | |
225 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | |
226 | ||
227 | /* This is for the new dynamic CPU boot code */ | |
228 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | |
229 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | |
7a8ef1cb | 230 | cpumask_t cpu_possible_map = CPU_MASK_NONE; |
4ad8d383 | 231 | EXPORT_SYMBOL(cpu_possible_map); |
1da177e4 LT |
232 | |
233 | /* The per processor IRQ masks (these are usually kept in sync) */ | |
234 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | |
235 | ||
236 | /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */ | |
237 | static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; | |
238 | ||
239 | /* Lock for enable/disable of VIC interrupts */ | |
a4ec1eff | 240 | static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock); |
1da177e4 | 241 | |
a4ec1eff | 242 | /* The boot processor is correctly set up in PC mode when it |
1da177e4 LT |
243 | * comes up, but the secondaries need their master/slave 8259 |
244 | * pairs initializing correctly */ | |
245 | ||
246 | /* Interrupt counters (per cpu) and total - used to try to | |
247 | * even up the interrupt handling routines */ | |
248 | static long vic_intr_total = 0; | |
249 | static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 }; | |
250 | static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 }; | |
251 | ||
252 | /* Since we can only use CPI0, we fake all the other CPIs */ | |
253 | static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned; | |
254 | ||
255 | /* debugging routine to read the isr of the cpu's pic */ | |
a4ec1eff | 256 | static inline __u16 vic_read_isr(void) |
1da177e4 LT |
257 | { |
258 | __u16 isr; | |
259 | ||
260 | outb(0x0b, 0xa0); | |
261 | isr = inb(0xa0) << 8; | |
262 | outb(0x0b, 0x20); | |
263 | isr |= inb(0x20); | |
264 | ||
265 | return isr; | |
266 | } | |
267 | ||
a4ec1eff | 268 | static __init void qic_setup(void) |
1da177e4 | 269 | { |
a4ec1eff | 270 | if (!is_cpu_quad()) { |
1da177e4 LT |
271 | /* not a quad, no setup */ |
272 | return; | |
273 | } | |
274 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); | |
275 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | |
a4ec1eff IM |
276 | |
277 | if (is_cpu_extended()) { | |
1da177e4 LT |
278 | /* the QIC duplicate of the VIC base register */ |
279 | outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER); | |
280 | outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER); | |
281 | ||
282 | /* FIXME: should set up the QIC timer and memory parity | |
283 | * error vectors here */ | |
284 | } | |
285 | } | |
286 | ||
a4ec1eff | 287 | static __init void vic_setup_pic(void) |
1da177e4 LT |
288 | { |
289 | outb(1, VIC_REDIRECT_REGISTER_1); | |
290 | /* clear the claim registers for dynamic routing */ | |
291 | outb(0, VIC_CLAIM_REGISTER_0); | |
292 | outb(0, VIC_CLAIM_REGISTER_1); | |
293 | ||
294 | outb(0, VIC_PRIORITY_REGISTER); | |
295 | /* Set the Primary and Secondary Microchannel vector | |
296 | * bases to be the same as the ordinary interrupts | |
297 | * | |
298 | * FIXME: This would be more efficient using separate | |
299 | * vectors. */ | |
300 | outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); | |
301 | outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); | |
302 | /* Now initiallise the master PIC belonging to this CPU by | |
303 | * sending the four ICWs */ | |
304 | ||
305 | /* ICW1: level triggered, ICW4 needed */ | |
306 | outb(0x19, 0x20); | |
307 | ||
308 | /* ICW2: vector base */ | |
309 | outb(FIRST_EXTERNAL_VECTOR, 0x21); | |
310 | ||
311 | /* ICW3: slave at line 2 */ | |
312 | outb(0x04, 0x21); | |
313 | ||
314 | /* ICW4: 8086 mode */ | |
315 | outb(0x01, 0x21); | |
316 | ||
317 | /* now the same for the slave PIC */ | |
318 | ||
319 | /* ICW1: level trigger, ICW4 needed */ | |
320 | outb(0x19, 0xA0); | |
321 | ||
322 | /* ICW2: slave vector base */ | |
323 | outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1); | |
a4ec1eff | 324 | |
1da177e4 LT |
325 | /* ICW3: slave ID */ |
326 | outb(0x02, 0xA1); | |
327 | ||
328 | /* ICW4: 8086 mode */ | |
329 | outb(0x01, 0xA1); | |
330 | } | |
331 | ||
a4ec1eff | 332 | static void do_quad_bootstrap(void) |
1da177e4 | 333 | { |
a4ec1eff | 334 | if (is_cpu_quad() && is_cpu_vic_boot()) { |
1da177e4 LT |
335 | int i; |
336 | unsigned long flags; | |
337 | __u8 cpuid = hard_smp_processor_id(); | |
338 | ||
339 | local_irq_save(flags); | |
340 | ||
a4ec1eff | 341 | for (i = 0; i < 4; i++) { |
1da177e4 | 342 | /* FIXME: this would be >>3 &0x7 on the 32 way */ |
a4ec1eff | 343 | if (((cpuid >> 2) & 0x03) == i) |
1da177e4 LT |
344 | /* don't lower our own mask! */ |
345 | continue; | |
346 | ||
347 | /* masquerade as local Quad CPU */ | |
348 | outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID); | |
349 | /* enable the startup CPI */ | |
350 | outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1); | |
351 | /* restore cpu id */ | |
352 | outb(0, QIC_PROCESSOR_ID); | |
353 | } | |
354 | local_irq_restore(flags); | |
355 | } | |
356 | } | |
357 | ||
1da177e4 LT |
358 | /* Set up all the basic stuff: read the SMP config and make all the |
359 | * SMP information reflect only the boot cpu. All others will be | |
360 | * brought on-line later. */ | |
a4ec1eff | 361 | void __init find_smp_config(void) |
1da177e4 LT |
362 | { |
363 | int i; | |
364 | ||
365 | boot_cpu_id = hard_smp_processor_id(); | |
366 | ||
367 | printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); | |
368 | ||
369 | /* initialize the CPU structures (moved from smp_boot_cpus) */ | |
a4ec1eff | 370 | for (i = 0; i < NR_CPUS; i++) { |
1da177e4 LT |
371 | cpu_irq_affinity[i] = ~0; |
372 | } | |
373 | cpu_online_map = cpumask_of_cpu(boot_cpu_id); | |
374 | ||
375 | /* The boot CPU must be extended */ | |
a4ec1eff | 376 | voyager_extended_vic_processors = 1 << boot_cpu_id; |
27b46d76 | 377 | /* initially, all of the first 8 CPUs can boot */ |
1da177e4 LT |
378 | voyager_allowed_boot_processors = 0xff; |
379 | /* set up everything for just this CPU, we can alter | |
380 | * this as we start the other CPUs later */ | |
381 | /* now get the CPU disposition from the extended CMOS */ | |
a4ec1eff IM |
382 | cpus_addr(phys_cpu_present_map)[0] = |
383 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); | |
384 | cpus_addr(phys_cpu_present_map)[0] |= | |
385 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; | |
386 | cpus_addr(phys_cpu_present_map)[0] |= | |
387 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | |
388 | 2) << 16; | |
389 | cpus_addr(phys_cpu_present_map)[0] |= | |
390 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | |
391 | 3) << 24; | |
f68a106f | 392 | cpu_possible_map = phys_cpu_present_map; |
a4ec1eff IM |
393 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", |
394 | cpus_addr(phys_cpu_present_map)[0]); | |
1da177e4 LT |
395 | /* Here we set up the VIC to enable SMP */ |
396 | /* enable the CPIs by writing the base vector to their register */ | |
397 | outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); | |
398 | outb(1, VIC_REDIRECT_REGISTER_1); | |
399 | /* set the claim registers for static routing --- Boot CPU gets | |
400 | * all interrupts untill all other CPUs started */ | |
401 | outb(0xff, VIC_CLAIM_REGISTER_0); | |
402 | outb(0xff, VIC_CLAIM_REGISTER_1); | |
403 | /* Set the Primary and Secondary Microchannel vector | |
404 | * bases to be the same as the ordinary interrupts | |
405 | * | |
406 | * FIXME: This would be more efficient using separate | |
407 | * vectors. */ | |
408 | outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); | |
409 | outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); | |
410 | ||
411 | /* Finally tell the firmware that we're driving */ | |
412 | outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG, | |
413 | VOYAGER_SUS_IN_CONTROL_PORT); | |
414 | ||
415 | current_thread_info()->cpu = boot_cpu_id; | |
6a3ee3d5 | 416 | x86_write_percpu(cpu_number, boot_cpu_id); |
1da177e4 LT |
417 | } |
418 | ||
419 | /* | |
420 | * The bootstrap kernel entry code has set these up. Save them | |
421 | * for a given CPU, id is physical */ | |
a4ec1eff | 422 | void __init smp_store_cpu_info(int id) |
1da177e4 | 423 | { |
92cb7612 | 424 | struct cpuinfo_x86 *c = &cpu_data(id); |
1da177e4 LT |
425 | |
426 | *c = boot_cpu_data; | |
427 | ||
6a3ee3d5 | 428 | identify_secondary_cpu(c); |
1da177e4 LT |
429 | } |
430 | ||
431 | /* set up the trampoline and return the physical address of the code */ | |
a4ec1eff | 432 | static __u32 __init setup_trampoline(void) |
1da177e4 LT |
433 | { |
434 | /* these two are global symbols in trampoline.S */ | |
121d7bf5 JB |
435 | extern const __u8 trampoline_end[]; |
436 | extern const __u8 trampoline_data[]; | |
1da177e4 | 437 | |
a4ec1eff | 438 | memcpy((__u8 *) trampoline_base, trampoline_data, |
1da177e4 | 439 | trampoline_end - trampoline_data); |
a4ec1eff | 440 | return virt_to_phys((__u8 *) trampoline_base); |
1da177e4 LT |
441 | } |
442 | ||
443 | /* Routine initially called when a non-boot CPU is brought online */ | |
a4ec1eff | 444 | static void __init start_secondary(void *unused) |
1da177e4 LT |
445 | { |
446 | __u8 cpuid = hard_smp_processor_id(); | |
1da177e4 | 447 | |
6a3ee3d5 | 448 | cpu_init(); |
1da177e4 LT |
449 | |
450 | /* OK, we're in the routine */ | |
451 | ack_CPI(VIC_CPU_BOOT_CPI); | |
452 | ||
453 | /* setup the 8259 master slave pair belonging to this CPU --- | |
a4ec1eff IM |
454 | * we won't actually receive any until the boot CPU |
455 | * relinquishes it's static routing mask */ | |
1da177e4 LT |
456 | vic_setup_pic(); |
457 | ||
458 | qic_setup(); | |
459 | ||
a4ec1eff | 460 | if (is_cpu_quad() && !is_cpu_vic_boot()) { |
1da177e4 LT |
461 | /* clear the boot CPI */ |
462 | __u8 dummy; | |
463 | ||
a4ec1eff IM |
464 | dummy = |
465 | voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi; | |
1da177e4 LT |
466 | printk("read dummy %d\n", dummy); |
467 | } | |
468 | ||
469 | /* lower the mask to receive CPIs */ | |
470 | vic_enable_cpi(); | |
471 | ||
472 | VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid)); | |
473 | ||
474 | /* enable interrupts */ | |
475 | local_irq_enable(); | |
476 | ||
477 | /* get our bogomips */ | |
478 | calibrate_delay(); | |
479 | ||
480 | /* save our processor parameters */ | |
481 | smp_store_cpu_info(cpuid); | |
482 | ||
483 | /* if we're a quad, we may need to bootstrap other CPUs */ | |
484 | do_quad_bootstrap(); | |
485 | ||
486 | /* FIXME: this is rather a poor hack to prevent the CPU | |
487 | * activating softirqs while it's supposed to be waiting for | |
488 | * permission to proceed. Without this, the new per CPU stuff | |
489 | * in the softirqs will fail */ | |
490 | local_irq_disable(); | |
491 | cpu_set(cpuid, cpu_callin_map); | |
492 | ||
493 | /* signal that we're done */ | |
494 | cpu_booted_map = 1; | |
495 | ||
496 | while (!cpu_isset(cpuid, smp_commenced_mask)) | |
497 | rep_nop(); | |
498 | local_irq_enable(); | |
499 | ||
500 | local_flush_tlb(); | |
501 | ||
502 | cpu_set(cpuid, cpu_online_map); | |
503 | wmb(); | |
504 | cpu_idle(); | |
505 | } | |
506 | ||
1da177e4 LT |
507 | /* Routine to kick start the given CPU and wait for it to report ready |
508 | * (or timeout in startup). When this routine returns, the requested | |
509 | * CPU is either fully running and configured or known to be dead. | |
510 | * | |
511 | * We call this routine sequentially 1 CPU at a time, so no need for | |
512 | * locking */ | |
513 | ||
a4ec1eff | 514 | static void __init do_boot_cpu(__u8 cpu) |
1da177e4 LT |
515 | { |
516 | struct task_struct *idle; | |
517 | int timeout; | |
518 | unsigned long flags; | |
a4ec1eff IM |
519 | int quad_boot = (1 << cpu) & voyager_quad_processors |
520 | & ~(voyager_extended_vic_processors | |
521 | & voyager_allowed_boot_processors); | |
1da177e4 | 522 | |
1da177e4 LT |
523 | /* This is an area in head.S which was used to set up the |
524 | * initial kernel stack. We need to alter this to give the | |
525 | * booting CPU a new stack (taken from its idle process) */ | |
526 | extern struct { | |
65ea5b03 | 527 | __u8 *sp; |
1da177e4 LT |
528 | unsigned short ss; |
529 | } stack_start; | |
530 | /* This is the format of the CPI IDT gate (in real mode) which | |
531 | * we're hijacking to boot the CPU */ | |
a4ec1eff | 532 | union IDTFormat { |
1da177e4 | 533 | struct seg { |
a4ec1eff IM |
534 | __u16 Offset; |
535 | __u16 Segment; | |
1da177e4 LT |
536 | } idt; |
537 | __u32 val; | |
538 | } hijack_source; | |
539 | ||
540 | __u32 *hijack_vector; | |
541 | __u32 start_phys_address = setup_trampoline(); | |
542 | ||
543 | /* There's a clever trick to this: The linux trampoline is | |
544 | * compiled to begin at absolute location zero, so make the | |
545 | * address zero but have the data segment selector compensate | |
546 | * for the actual address */ | |
547 | hijack_source.idt.Offset = start_phys_address & 0x000F; | |
548 | hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF; | |
549 | ||
550 | cpucount++; | |
d6444514 JB |
551 | alternatives_smp_switch(1); |
552 | ||
1da177e4 | 553 | idle = fork_idle(cpu); |
a4ec1eff | 554 | if (IS_ERR(idle)) |
1da177e4 | 555 | panic("failed fork for CPU%d", cpu); |
65ea5b03 | 556 | idle->thread.ip = (unsigned long)start_secondary; |
1da177e4 | 557 | /* init_tasks (in sched.c) is indexed logically */ |
65ea5b03 | 558 | stack_start.sp = (void *)idle->thread.sp; |
1da177e4 | 559 | |
6a3ee3d5 | 560 | init_gdt(cpu); |
a4ec1eff | 561 | per_cpu(current_task, cpu) = idle; |
6a3ee3d5 | 562 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
1da177e4 LT |
563 | irq_ctx_init(cpu); |
564 | ||
565 | /* Note: Don't modify initial ss override */ | |
a4ec1eff | 566 | VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, |
1da177e4 | 567 | (unsigned long)hijack_source.val, hijack_source.idt.Segment, |
65ea5b03 | 568 | hijack_source.idt.Offset, stack_start.sp)); |
9d0e59a3 EB |
569 | |
570 | /* init lowmem identity mapping */ | |
571 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, | |
572 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); | |
573 | flush_tlb_all(); | |
1da177e4 | 574 | |
a4ec1eff | 575 | if (quad_boot) { |
1da177e4 | 576 | printk("CPU %d: non extended Quad boot\n", cpu); |
a4ec1eff IM |
577 | hijack_vector = |
578 | (__u32 *) | |
579 | phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4); | |
1da177e4 LT |
580 | *hijack_vector = hijack_source.val; |
581 | } else { | |
582 | printk("CPU%d: extended VIC boot\n", cpu); | |
a4ec1eff IM |
583 | hijack_vector = |
584 | (__u32 *) | |
585 | phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4); | |
1da177e4 LT |
586 | *hijack_vector = hijack_source.val; |
587 | /* VIC errata, may also receive interrupt at this address */ | |
a4ec1eff IM |
588 | hijack_vector = |
589 | (__u32 *) | |
590 | phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + | |
591 | VIC_DEFAULT_CPI_BASE) * 4); | |
1da177e4 LT |
592 | *hijack_vector = hijack_source.val; |
593 | } | |
594 | /* All non-boot CPUs start with interrupts fully masked. Need | |
595 | * to lower the mask of the CPI we're about to send. We do | |
596 | * this in the VIC by masquerading as the processor we're | |
597 | * about to boot and lowering its interrupt mask */ | |
598 | local_irq_save(flags); | |
a4ec1eff | 599 | if (quad_boot) { |
1da177e4 LT |
600 | send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI); |
601 | } else { | |
602 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | |
603 | /* here we're altering registers belonging to `cpu' */ | |
a4ec1eff | 604 | |
1da177e4 LT |
605 | outb(VIC_BOOT_INTERRUPT_MASK, 0x21); |
606 | /* now go back to our original identity */ | |
607 | outb(boot_cpu_id, VIC_PROCESSOR_ID); | |
608 | ||
609 | /* and boot the CPU */ | |
610 | ||
a4ec1eff | 611 | send_CPI((1 << cpu), VIC_CPU_BOOT_CPI); |
1da177e4 LT |
612 | } |
613 | cpu_booted_map = 0; | |
614 | local_irq_restore(flags); | |
615 | ||
616 | /* now wait for it to become ready (or timeout) */ | |
a4ec1eff IM |
617 | for (timeout = 0; timeout < 50000; timeout++) { |
618 | if (cpu_booted_map) | |
1da177e4 LT |
619 | break; |
620 | udelay(100); | |
621 | } | |
622 | /* reset the page table */ | |
9d0e59a3 | 623 | zap_low_mappings(); |
a4ec1eff | 624 | |
1da177e4 LT |
625 | if (cpu_booted_map) { |
626 | VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", | |
627 | cpu, smp_processor_id())); | |
a4ec1eff | 628 | |
1da177e4 | 629 | printk("CPU%d: ", cpu); |
92cb7612 | 630 | print_cpu_info(&cpu_data(cpu)); |
1da177e4 LT |
631 | wmb(); |
632 | cpu_set(cpu, cpu_callout_map); | |
3c101cf0 | 633 | cpu_set(cpu, cpu_present_map); |
a4ec1eff | 634 | } else { |
1da177e4 | 635 | printk("CPU%d FAILED TO BOOT: ", cpu); |
a4ec1eff IM |
636 | if (* |
637 | ((volatile unsigned char *)phys_to_virt(start_phys_address)) | |
638 | == 0xA5) | |
1da177e4 LT |
639 | printk("Stuck.\n"); |
640 | else | |
641 | printk("Not responding.\n"); | |
a4ec1eff | 642 | |
1da177e4 LT |
643 | cpucount--; |
644 | } | |
645 | } | |
646 | ||
a4ec1eff | 647 | void __init smp_boot_cpus(void) |
1da177e4 LT |
648 | { |
649 | int i; | |
650 | ||
651 | /* CAT BUS initialisation must be done after the memory */ | |
652 | /* FIXME: The L4 has a catbus too, it just needs to be | |
653 | * accessed in a totally different way */ | |
a4ec1eff | 654 | if (voyager_level == 5) { |
1da177e4 LT |
655 | voyager_cat_init(); |
656 | ||
657 | /* now that the cat has probed the Voyager System Bus, sanity | |
658 | * check the cpu map */ | |
a4ec1eff IM |
659 | if (((voyager_quad_processors | voyager_extended_vic_processors) |
660 | & cpus_addr(phys_cpu_present_map)[0]) != | |
661 | cpus_addr(phys_cpu_present_map)[0]) { | |
1da177e4 | 662 | /* should panic */ |
a4ec1eff IM |
663 | printk("\n\n***WARNING*** " |
664 | "Sanity check of CPU present map FAILED\n"); | |
1da177e4 | 665 | } |
a4ec1eff IM |
666 | } else if (voyager_level == 4) |
667 | voyager_extended_vic_processors = | |
668 | cpus_addr(phys_cpu_present_map)[0]; | |
1da177e4 LT |
669 | |
670 | /* this sets up the idle task to run on the current cpu */ | |
671 | voyager_extended_cpus = 1; | |
672 | /* Remove the global_irq_holder setting, it triggers a BUG() on | |
673 | * schedule at the moment */ | |
674 | //global_irq_holder = boot_cpu_id; | |
675 | ||
676 | /* FIXME: Need to do something about this but currently only works | |
a4ec1eff IM |
677 | * on CPUs with a tsc which none of mine have. |
678 | smp_tune_scheduling(); | |
1da177e4 LT |
679 | */ |
680 | smp_store_cpu_info(boot_cpu_id); | |
681 | printk("CPU%d: ", boot_cpu_id); | |
92cb7612 | 682 | print_cpu_info(&cpu_data(boot_cpu_id)); |
1da177e4 | 683 | |
a4ec1eff | 684 | if (is_cpu_quad()) { |
1da177e4 LT |
685 | /* booting on a Quad CPU */ |
686 | printk("VOYAGER SMP: Boot CPU is Quad\n"); | |
687 | qic_setup(); | |
688 | do_quad_bootstrap(); | |
689 | } | |
690 | ||
691 | /* enable our own CPIs */ | |
692 | vic_enable_cpi(); | |
693 | ||
694 | cpu_set(boot_cpu_id, cpu_online_map); | |
695 | cpu_set(boot_cpu_id, cpu_callout_map); | |
a4ec1eff IM |
696 | |
697 | /* loop over all the extended VIC CPUs and boot them. The | |
1da177e4 | 698 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ |
a4ec1eff IM |
699 | for (i = 0; i < NR_CPUS; i++) { |
700 | if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) | |
1da177e4 LT |
701 | continue; |
702 | do_boot_cpu(i); | |
703 | /* This udelay seems to be needed for the Quad boots | |
704 | * don't remove unless you know what you're doing */ | |
705 | udelay(1000); | |
706 | } | |
707 | /* we could compute the total bogomips here, but why bother?, | |
708 | * Code added from smpboot.c */ | |
709 | { | |
710 | unsigned long bogosum = 0; | |
711 | for (i = 0; i < NR_CPUS; i++) | |
712 | if (cpu_isset(i, cpu_online_map)) | |
92cb7612 | 713 | bogosum += cpu_data(i).loops_per_jiffy; |
a4ec1eff IM |
714 | printk(KERN_INFO "Total of %d processors activated " |
715 | "(%lu.%02lu BogoMIPS).\n", | |
716 | cpucount + 1, bogosum / (500000 / HZ), | |
717 | (bogosum / (5000 / HZ)) % 100); | |
1da177e4 LT |
718 | } |
719 | voyager_extended_cpus = hweight32(voyager_extended_vic_processors); | |
a4ec1eff IM |
720 | printk("VOYAGER: Extended (interrupt handling CPUs): " |
721 | "%d, non-extended: %d\n", voyager_extended_cpus, | |
722 | num_booting_cpus() - voyager_extended_cpus); | |
1da177e4 LT |
723 | /* that's it, switch to symmetric mode */ |
724 | outb(0, VIC_PRIORITY_REGISTER); | |
725 | outb(0, VIC_CLAIM_REGISTER_0); | |
726 | outb(0, VIC_CLAIM_REGISTER_1); | |
a4ec1eff | 727 | |
1da177e4 LT |
728 | VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus())); |
729 | } | |
730 | ||
731 | /* Reload the secondary CPUs task structure (this function does not | |
732 | * return ) */ | |
a4ec1eff | 733 | void __init initialize_secondary(void) |
1da177e4 LT |
734 | { |
735 | #if 0 | |
736 | // AC kernels only | |
737 | set_current(hard_get_current()); | |
738 | #endif | |
739 | ||
740 | /* | |
741 | * We don't actually need to load the full TSS, | |
742 | * basically just the stack pointer and the eip. | |
743 | */ | |
744 | ||
a4ec1eff | 745 | asm volatile ("movl %0,%%esp\n\t" |
65ea5b03 PA |
746 | "jmp *%1"::"r" (current->thread.sp), |
747 | "r"(current->thread.ip)); | |
1da177e4 LT |
748 | } |
749 | ||
750 | /* handle a Voyager SYS_INT -- If we don't, the base board will | |
751 | * panic the system. | |
752 | * | |
753 | * System interrupts occur because some problem was detected on the | |
754 | * various busses. To find out what you have to probe all the | |
755 | * hardware via the CAT bus. FIXME: At the moment we do nothing. */ | |
75604d7f | 756 | void smp_vic_sys_interrupt(struct pt_regs *regs) |
1da177e4 LT |
757 | { |
758 | ack_CPI(VIC_SYS_INT); | |
a4ec1eff | 759 | printk("Voyager SYSTEM INTERRUPT\n"); |
1da177e4 LT |
760 | } |
761 | ||
762 | /* Handle a voyager CMN_INT; These interrupts occur either because of | |
763 | * a system status change or because a single bit memory error | |
764 | * occurred. FIXME: At the moment, ignore all this. */ | |
75604d7f | 765 | void smp_vic_cmn_interrupt(struct pt_regs *regs) |
1da177e4 LT |
766 | { |
767 | static __u8 in_cmn_int = 0; | |
768 | static DEFINE_SPINLOCK(cmn_int_lock); | |
769 | ||
770 | /* common ints are broadcast, so make sure we only do this once */ | |
771 | _raw_spin_lock(&cmn_int_lock); | |
a4ec1eff | 772 | if (in_cmn_int) |
1da177e4 LT |
773 | goto unlock_end; |
774 | ||
775 | in_cmn_int++; | |
776 | _raw_spin_unlock(&cmn_int_lock); | |
777 | ||
778 | VDEBUG(("Voyager COMMON INTERRUPT\n")); | |
779 | ||
a4ec1eff | 780 | if (voyager_level == 5) |
1da177e4 LT |
781 | voyager_cat_do_common_interrupt(); |
782 | ||
783 | _raw_spin_lock(&cmn_int_lock); | |
784 | in_cmn_int = 0; | |
a4ec1eff | 785 | unlock_end: |
1da177e4 LT |
786 | _raw_spin_unlock(&cmn_int_lock); |
787 | ack_CPI(VIC_CMN_INT); | |
788 | } | |
789 | ||
790 | /* | |
791 | * Reschedule call back. Nothing to do, all the work is done | |
792 | * automatically when we return from the interrupt. */ | |
a4ec1eff | 793 | static void smp_reschedule_interrupt(void) |
1da177e4 LT |
794 | { |
795 | /* do nothing */ | |
796 | } | |
797 | ||
a4ec1eff | 798 | static struct mm_struct *flush_mm; |
1da177e4 LT |
799 | static unsigned long flush_va; |
800 | static DEFINE_SPINLOCK(tlbstate_lock); | |
1da177e4 LT |
801 | |
802 | /* | |
a4ec1eff | 803 | * We cannot call mmdrop() because we are in interrupt context, |
1da177e4 LT |
804 | * instead update mm->cpu_vm_mask. |
805 | * | |
806 | * We need to reload %cr3 since the page tables may be going | |
807 | * away from under us.. | |
808 | */ | |
925596a0 | 809 | static inline void voyager_leave_mm(unsigned long cpu) |
1da177e4 LT |
810 | { |
811 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | |
812 | BUG(); | |
813 | cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); | |
814 | load_cr3(swapper_pg_dir); | |
815 | } | |
816 | ||
1da177e4 LT |
817 | /* |
818 | * Invalidate call-back | |
819 | */ | |
a4ec1eff | 820 | static void smp_invalidate_interrupt(void) |
1da177e4 LT |
821 | { |
822 | __u8 cpu = smp_processor_id(); | |
823 | ||
824 | if (!test_bit(cpu, &smp_invalidate_needed)) | |
825 | return; | |
826 | /* This will flood messages. Don't uncomment unless you see | |
827 | * Problems with cross cpu invalidation | |
a4ec1eff IM |
828 | VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n", |
829 | smp_processor_id())); | |
830 | */ | |
1da177e4 LT |
831 | |
832 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | |
833 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | |
0b9c99b6 | 834 | if (flush_va == TLB_FLUSH_ALL) |
1da177e4 LT |
835 | local_flush_tlb(); |
836 | else | |
837 | __flush_tlb_one(flush_va); | |
838 | } else | |
925596a0 | 839 | voyager_leave_mm(cpu); |
1da177e4 LT |
840 | } |
841 | smp_mb__before_clear_bit(); | |
842 | clear_bit(cpu, &smp_invalidate_needed); | |
843 | smp_mb__after_clear_bit(); | |
844 | } | |
845 | ||
846 | /* All the new flush operations for 2.4 */ | |
847 | ||
1da177e4 LT |
848 | /* This routine is called with a physical cpu mask */ |
849 | static void | |
a4ec1eff IM |
850 | voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm, |
851 | unsigned long va) | |
1da177e4 LT |
852 | { |
853 | int stuck = 50000; | |
854 | ||
855 | if (!cpumask) | |
856 | BUG(); | |
857 | if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask) | |
858 | BUG(); | |
859 | if (cpumask & (1 << smp_processor_id())) | |
860 | BUG(); | |
861 | if (!mm) | |
862 | BUG(); | |
863 | ||
864 | spin_lock(&tlbstate_lock); | |
a4ec1eff | 865 | |
1da177e4 LT |
866 | flush_mm = mm; |
867 | flush_va = va; | |
868 | atomic_set_mask(cpumask, &smp_invalidate_needed); | |
869 | /* | |
870 | * We have to send the CPI only to | |
871 | * CPUs affected. | |
872 | */ | |
873 | send_CPI(cpumask, VIC_INVALIDATE_CPI); | |
874 | ||
875 | while (smp_invalidate_needed) { | |
876 | mb(); | |
a4ec1eff IM |
877 | if (--stuck == 0) { |
878 | printk("***WARNING*** Stuck doing invalidate CPI " | |
879 | "(CPU%d)\n", smp_processor_id()); | |
1da177e4 LT |
880 | break; |
881 | } | |
882 | } | |
883 | ||
884 | /* Uncomment only to debug invalidation problems | |
a4ec1eff IM |
885 | VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu)); |
886 | */ | |
1da177e4 LT |
887 | |
888 | flush_mm = NULL; | |
889 | flush_va = 0; | |
890 | spin_unlock(&tlbstate_lock); | |
891 | } | |
892 | ||
a4ec1eff | 893 | void flush_tlb_current_task(void) |
1da177e4 LT |
894 | { |
895 | struct mm_struct *mm = current->mm; | |
896 | unsigned long cpu_mask; | |
897 | ||
898 | preempt_disable(); | |
899 | ||
900 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
901 | local_flush_tlb(); | |
902 | if (cpu_mask) | |
0b9c99b6 | 903 | voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
1da177e4 LT |
904 | |
905 | preempt_enable(); | |
906 | } | |
907 | ||
a4ec1eff | 908 | void flush_tlb_mm(struct mm_struct *mm) |
1da177e4 LT |
909 | { |
910 | unsigned long cpu_mask; | |
911 | ||
912 | preempt_disable(); | |
913 | ||
914 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
915 | ||
916 | if (current->active_mm == mm) { | |
917 | if (current->mm) | |
918 | local_flush_tlb(); | |
919 | else | |
925596a0 | 920 | voyager_leave_mm(smp_processor_id()); |
1da177e4 LT |
921 | } |
922 | if (cpu_mask) | |
0b9c99b6 | 923 | voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
1da177e4 LT |
924 | |
925 | preempt_enable(); | |
926 | } | |
927 | ||
a4ec1eff | 928 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) |
1da177e4 LT |
929 | { |
930 | struct mm_struct *mm = vma->vm_mm; | |
931 | unsigned long cpu_mask; | |
932 | ||
933 | preempt_disable(); | |
934 | ||
935 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
936 | if (current->active_mm == mm) { | |
a4ec1eff | 937 | if (current->mm) |
1da177e4 | 938 | __flush_tlb_one(va); |
a4ec1eff | 939 | else |
925596a0 | 940 | voyager_leave_mm(smp_processor_id()); |
1da177e4 LT |
941 | } |
942 | ||
943 | if (cpu_mask) | |
6a3ee3d5 | 944 | voyager_flush_tlb_others(cpu_mask, mm, va); |
1da177e4 LT |
945 | |
946 | preempt_enable(); | |
947 | } | |
a4ec1eff | 948 | |
153f8057 | 949 | EXPORT_SYMBOL(flush_tlb_page); |
1da177e4 LT |
950 | |
951 | /* enable the requested IRQs */ | |
a4ec1eff | 952 | static void smp_enable_irq_interrupt(void) |
1da177e4 LT |
953 | { |
954 | __u8 irq; | |
955 | __u8 cpu = get_cpu(); | |
956 | ||
957 | VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu, | |
a4ec1eff | 958 | vic_irq_enable_mask[cpu])); |
1da177e4 LT |
959 | |
960 | spin_lock(&vic_irq_lock); | |
a4ec1eff IM |
961 | for (irq = 0; irq < 16; irq++) { |
962 | if (vic_irq_enable_mask[cpu] & (1 << irq)) | |
1da177e4 LT |
963 | enable_local_vic_irq(irq); |
964 | } | |
965 | vic_irq_enable_mask[cpu] = 0; | |
966 | spin_unlock(&vic_irq_lock); | |
967 | ||
968 | put_cpu_no_resched(); | |
969 | } | |
a4ec1eff | 970 | |
1da177e4 LT |
971 | /* |
972 | * CPU halt call-back | |
973 | */ | |
a4ec1eff | 974 | static void smp_stop_cpu_function(void *dummy) |
1da177e4 LT |
975 | { |
976 | VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id())); | |
977 | cpu_clear(smp_processor_id(), cpu_online_map); | |
978 | local_irq_disable(); | |
a4ec1eff | 979 | for (;;) |
f2ab4461 | 980 | halt(); |
1da177e4 LT |
981 | } |
982 | ||
983 | static DEFINE_SPINLOCK(call_lock); | |
984 | ||
985 | struct call_data_struct { | |
986 | void (*func) (void *info); | |
987 | void *info; | |
988 | volatile unsigned long started; | |
989 | volatile unsigned long finished; | |
990 | int wait; | |
991 | }; | |
992 | ||
a4ec1eff | 993 | static struct call_data_struct *call_data; |
1da177e4 LT |
994 | |
995 | /* execute a thread on a new CPU. The function to be called must be | |
996 | * previously set up. This is used to schedule a function for | |
27b46d76 | 997 | * execution on all CPUs - set up the function then broadcast a |
1da177e4 | 998 | * function_interrupt CPI to come here on each CPU */ |
a4ec1eff | 999 | static void smp_call_function_interrupt(void) |
1da177e4 LT |
1000 | { |
1001 | void (*func) (void *info) = call_data->func; | |
1002 | void *info = call_data->info; | |
1003 | /* must take copy of wait because call_data may be replaced | |
1004 | * unless the function is waiting for us to finish */ | |
1005 | int wait = call_data->wait; | |
1006 | __u8 cpu = smp_processor_id(); | |
1007 | ||
1008 | /* | |
1009 | * Notify initiating CPU that I've grabbed the data and am | |
1010 | * about to execute the function | |
1011 | */ | |
1012 | mb(); | |
a4ec1eff | 1013 | if (!test_and_clear_bit(cpu, &call_data->started)) { |
1da177e4 | 1014 | /* If the bit wasn't set, this could be a replay */ |
a4ec1eff IM |
1015 | printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion" |
1016 | " with no call pending\n", cpu); | |
1da177e4 LT |
1017 | return; |
1018 | } | |
1019 | /* | |
1020 | * At this point the info structure may be out of scope unless wait==1 | |
1021 | */ | |
1022 | irq_enter(); | |
a4ec1eff | 1023 | (*func) (info); |
38e760a1 | 1024 | __get_cpu_var(irq_stat).irq_call_count++; |
1da177e4 LT |
1025 | irq_exit(); |
1026 | if (wait) { | |
1027 | mb(); | |
1028 | clear_bit(cpu, &call_data->finished); | |
1029 | } | |
1030 | } | |
1031 | ||
0293ca81 | 1032 | static int |
a4ec1eff IM |
1033 | voyager_smp_call_function_mask(cpumask_t cpumask, |
1034 | void (*func) (void *info), void *info, int wait) | |
1da177e4 LT |
1035 | { |
1036 | struct call_data_struct data; | |
6a3ee3d5 | 1037 | u32 mask = cpus_addr(cpumask)[0]; |
1da177e4 | 1038 | |
a4ec1eff | 1039 | mask &= ~(1 << smp_processor_id()); |
1da177e4 LT |
1040 | |
1041 | if (!mask) | |
1042 | return 0; | |
1043 | ||
1044 | /* Can deadlock when called with interrupts disabled */ | |
1045 | WARN_ON(irqs_disabled()); | |
1046 | ||
1047 | data.func = func; | |
1048 | data.info = info; | |
1049 | data.started = mask; | |
1050 | data.wait = wait; | |
1051 | if (wait) | |
1052 | data.finished = mask; | |
1053 | ||
1054 | spin_lock(&call_lock); | |
1055 | call_data = &data; | |
1056 | wmb(); | |
1057 | /* Send a message to all other CPUs and wait for them to respond */ | |
0293ca81 | 1058 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); |
1da177e4 LT |
1059 | |
1060 | /* Wait for response */ | |
1061 | while (data.started) | |
1062 | barrier(); | |
1063 | ||
1064 | if (wait) | |
1065 | while (data.finished) | |
1066 | barrier(); | |
1067 | ||
1068 | spin_unlock(&call_lock); | |
1069 | ||
1070 | return 0; | |
1071 | } | |
0293ca81 | 1072 | |
1da177e4 LT |
1073 | /* Sorry about the name. In an APIC based system, the APICs |
1074 | * themselves are programmed to send a timer interrupt. This is used | |
1075 | * by linux to reschedule the processor. Voyager doesn't have this, | |
1076 | * so we use the system clock to interrupt one processor, which in | |
1077 | * turn, broadcasts a timer CPI to all the others --- we receive that | |
1078 | * CPI here. We don't use this actually for counting so losing | |
a4ec1eff | 1079 | * ticks doesn't matter |
1da177e4 | 1080 | * |
27b46d76 | 1081 | * FIXME: For those CPUs which actually have a local APIC, we could |
1da177e4 LT |
1082 | * try to use it to trigger this interrupt instead of having to |
1083 | * broadcast the timer tick. Unfortunately, all my pentium DYADs have | |
1084 | * no local APIC, so I can't do this | |
1085 | * | |
1086 | * This function is currently a placeholder and is unused in the code */ | |
75604d7f | 1087 | void smp_apic_timer_interrupt(struct pt_regs *regs) |
1da177e4 | 1088 | { |
7d12e780 DH |
1089 | struct pt_regs *old_regs = set_irq_regs(regs); |
1090 | wrapper_smp_local_timer_interrupt(); | |
1091 | set_irq_regs(old_regs); | |
1da177e4 LT |
1092 | } |
1093 | ||
1094 | /* All of the QUAD interrupt GATES */ | |
75604d7f | 1095 | void smp_qic_timer_interrupt(struct pt_regs *regs) |
1da177e4 | 1096 | { |
7d12e780 | 1097 | struct pt_regs *old_regs = set_irq_regs(regs); |
81c06b10 JB |
1098 | ack_QIC_CPI(QIC_TIMER_CPI); |
1099 | wrapper_smp_local_timer_interrupt(); | |
7d12e780 | 1100 | set_irq_regs(old_regs); |
1da177e4 LT |
1101 | } |
1102 | ||
75604d7f | 1103 | void smp_qic_invalidate_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1104 | { |
1105 | ack_QIC_CPI(QIC_INVALIDATE_CPI); | |
1106 | smp_invalidate_interrupt(); | |
1107 | } | |
1108 | ||
75604d7f | 1109 | void smp_qic_reschedule_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1110 | { |
1111 | ack_QIC_CPI(QIC_RESCHEDULE_CPI); | |
1112 | smp_reschedule_interrupt(); | |
1113 | } | |
1114 | ||
75604d7f | 1115 | void smp_qic_enable_irq_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1116 | { |
1117 | ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); | |
1118 | smp_enable_irq_interrupt(); | |
1119 | } | |
1120 | ||
75604d7f | 1121 | void smp_qic_call_function_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1122 | { |
1123 | ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); | |
1124 | smp_call_function_interrupt(); | |
1125 | } | |
1126 | ||
75604d7f | 1127 | void smp_vic_cpi_interrupt(struct pt_regs *regs) |
1da177e4 | 1128 | { |
7d12e780 | 1129 | struct pt_regs *old_regs = set_irq_regs(regs); |
1da177e4 LT |
1130 | __u8 cpu = smp_processor_id(); |
1131 | ||
a4ec1eff | 1132 | if (is_cpu_quad()) |
1da177e4 LT |
1133 | ack_QIC_CPI(VIC_CPI_LEVEL0); |
1134 | else | |
1135 | ack_VIC_CPI(VIC_CPI_LEVEL0); | |
1136 | ||
a4ec1eff | 1137 | if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) |
7d12e780 | 1138 | wrapper_smp_local_timer_interrupt(); |
a4ec1eff | 1139 | if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1140 | smp_invalidate_interrupt(); |
a4ec1eff | 1141 | if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1142 | smp_reschedule_interrupt(); |
a4ec1eff | 1143 | if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1144 | smp_enable_irq_interrupt(); |
a4ec1eff | 1145 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1146 | smp_call_function_interrupt(); |
7d12e780 | 1147 | set_irq_regs(old_regs); |
1da177e4 LT |
1148 | } |
1149 | ||
a4ec1eff | 1150 | static void do_flush_tlb_all(void *info) |
1da177e4 LT |
1151 | { |
1152 | unsigned long cpu = smp_processor_id(); | |
1153 | ||
1154 | __flush_tlb_all(); | |
1155 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) | |
925596a0 | 1156 | voyager_leave_mm(cpu); |
1da177e4 LT |
1157 | } |
1158 | ||
1da177e4 | 1159 | /* flush the TLB of every active CPU in the system */ |
a4ec1eff | 1160 | void flush_tlb_all(void) |
1da177e4 LT |
1161 | { |
1162 | on_each_cpu(do_flush_tlb_all, 0, 1, 1); | |
1163 | } | |
1164 | ||
1165 | /* used to set up the trampoline for other CPUs when the memory manager | |
1166 | * is sorted out */ | |
a4ec1eff | 1167 | void __init smp_alloc_memory(void) |
1da177e4 | 1168 | { |
a4ec1eff IM |
1169 | trampoline_base = (__u32) alloc_bootmem_low_pages(PAGE_SIZE); |
1170 | if (__pa(trampoline_base) >= 0x93000) | |
1da177e4 LT |
1171 | BUG(); |
1172 | } | |
1173 | ||
1174 | /* send a reschedule CPI to one CPU by physical CPU number*/ | |
a4ec1eff | 1175 | static void voyager_smp_send_reschedule(int cpu) |
1da177e4 LT |
1176 | { |
1177 | send_one_CPI(cpu, VIC_RESCHEDULE_CPI); | |
1178 | } | |
1179 | ||
a4ec1eff | 1180 | int hard_smp_processor_id(void) |
1da177e4 LT |
1181 | { |
1182 | __u8 i; | |
1183 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | |
a4ec1eff | 1184 | if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER) |
1da177e4 LT |
1185 | return cpumask & 0x1F; |
1186 | ||
a4ec1eff IM |
1187 | for (i = 0; i < 8; i++) { |
1188 | if (cpumask & (1 << i)) | |
1da177e4 LT |
1189 | return i; |
1190 | } | |
1191 | printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask); | |
1192 | return 0; | |
1193 | } | |
1194 | ||
a4ec1eff | 1195 | int safe_smp_processor_id(void) |
2654c08c FV |
1196 | { |
1197 | return hard_smp_processor_id(); | |
1198 | } | |
1199 | ||
1da177e4 | 1200 | /* broadcast a halt to all other CPUs */ |
a4ec1eff | 1201 | static void voyager_smp_send_stop(void) |
1da177e4 LT |
1202 | { |
1203 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); | |
1204 | } | |
1205 | ||
1206 | /* this function is triggered in time.c when a clock tick fires | |
1207 | * we need to re-broadcast the tick to all CPUs */ | |
a4ec1eff | 1208 | void smp_vic_timer_interrupt(void) |
1da177e4 LT |
1209 | { |
1210 | send_CPI_allbutself(VIC_TIMER_CPI); | |
7d12e780 | 1211 | smp_local_timer_interrupt(); |
1da177e4 LT |
1212 | } |
1213 | ||
1da177e4 LT |
1214 | /* local (per CPU) timer interrupt. It does both profiling and |
1215 | * process statistics/rescheduling. | |
1216 | * | |
1217 | * We do profiling in every local tick, statistics/rescheduling | |
1218 | * happen only every 'profiling multiplier' ticks. The default | |
1219 | * multiplier is 1 and it can be changed by writing the new multiplier | |
1220 | * value into /proc/profile. | |
1221 | */ | |
a4ec1eff | 1222 | void smp_local_timer_interrupt(void) |
1da177e4 LT |
1223 | { |
1224 | int cpu = smp_processor_id(); | |
1225 | long weight; | |
1226 | ||
7d12e780 | 1227 | profile_tick(CPU_PROFILING); |
1da177e4 LT |
1228 | if (--per_cpu(prof_counter, cpu) <= 0) { |
1229 | /* | |
1230 | * The multiplier may have changed since the last time we got | |
1231 | * to this point as a result of the user writing to | |
1232 | * /proc/profile. In this case we need to adjust the APIC | |
1233 | * timer accordingly. | |
1234 | * | |
1235 | * Interrupts are already masked off at this point. | |
1236 | */ | |
a4ec1eff | 1237 | per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu); |
1da177e4 | 1238 | if (per_cpu(prof_counter, cpu) != |
a4ec1eff | 1239 | per_cpu(prof_old_multiplier, cpu)) { |
1da177e4 LT |
1240 | /* FIXME: need to update the vic timer tick here */ |
1241 | per_cpu(prof_old_multiplier, cpu) = | |
a4ec1eff | 1242 | per_cpu(prof_counter, cpu); |
1da177e4 LT |
1243 | } |
1244 | ||
81c06b10 | 1245 | update_process_times(user_mode_vm(get_irq_regs())); |
1da177e4 LT |
1246 | } |
1247 | ||
a4ec1eff | 1248 | if (((1 << cpu) & voyager_extended_vic_processors) == 0) |
1da177e4 LT |
1249 | /* only extended VIC processors participate in |
1250 | * interrupt distribution */ | |
1251 | return; | |
1252 | ||
1253 | /* | |
1254 | * We take the 'long' return path, and there every subsystem | |
27b46d76 | 1255 | * grabs the appropriate locks (kernel lock/ irq lock). |
1da177e4 LT |
1256 | * |
1257 | * we might want to decouple profiling from the 'long path', | |
1258 | * and do the profiling totally in assembly. | |
1259 | * | |
1260 | * Currently this isn't too much of an issue (performance wise), | |
1261 | * we can take more than 100K local irqs per second on a 100 MHz P5. | |
1262 | */ | |
1263 | ||
a4ec1eff | 1264 | if ((++vic_tick[cpu] & 0x7) != 0) |
1da177e4 LT |
1265 | return; |
1266 | /* get here every 16 ticks (about every 1/6 of a second) */ | |
1267 | ||
1268 | /* Change our priority to give someone else a chance at getting | |
a4ec1eff | 1269 | * the IRQ. The algorithm goes like this: |
1da177e4 LT |
1270 | * |
1271 | * In the VIC, the dynamically routed interrupt is always | |
1272 | * handled by the lowest priority eligible (i.e. receiving | |
1273 | * interrupts) CPU. If >1 eligible CPUs are equal lowest, the | |
1274 | * lowest processor number gets it. | |
1275 | * | |
1276 | * The priority of a CPU is controlled by a special per-CPU | |
1277 | * VIC priority register which is 3 bits wide 0 being lowest | |
1278 | * and 7 highest priority.. | |
1279 | * | |
1280 | * Therefore we subtract the average number of interrupts from | |
1281 | * the number we've fielded. If this number is negative, we | |
1282 | * lower the activity count and if it is positive, we raise | |
1283 | * it. | |
1284 | * | |
1285 | * I'm afraid this still leads to odd looking interrupt counts: | |
1286 | * the totals are all roughly equal, but the individual ones | |
1287 | * look rather skewed. | |
1288 | * | |
1289 | * FIXME: This algorithm is total crap when mixed with SMP | |
1290 | * affinity code since we now try to even up the interrupt | |
1291 | * counts when an affinity binding is keeping them on a | |
1292 | * particular CPU*/ | |
a4ec1eff | 1293 | weight = (vic_intr_count[cpu] * voyager_extended_cpus |
1da177e4 LT |
1294 | - vic_intr_total) >> 4; |
1295 | weight += 4; | |
a4ec1eff | 1296 | if (weight > 7) |
1da177e4 | 1297 | weight = 7; |
a4ec1eff | 1298 | if (weight < 0) |
1da177e4 | 1299 | weight = 0; |
a4ec1eff IM |
1300 | |
1301 | outb((__u8) weight, VIC_PRIORITY_REGISTER); | |
1da177e4 LT |
1302 | |
1303 | #ifdef VOYAGER_DEBUG | |
a4ec1eff | 1304 | if ((vic_tick[cpu] & 0xFFF) == 0) { |
1da177e4 LT |
1305 | /* print this message roughly every 25 secs */ |
1306 | printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n", | |
1307 | cpu, vic_tick[cpu], weight); | |
1308 | } | |
1309 | #endif | |
1310 | } | |
1311 | ||
1312 | /* setup the profiling timer */ | |
a4ec1eff | 1313 | int setup_profiling_timer(unsigned int multiplier) |
1da177e4 LT |
1314 | { |
1315 | int i; | |
1316 | ||
a4ec1eff | 1317 | if ((!multiplier)) |
1da177e4 LT |
1318 | return -EINVAL; |
1319 | ||
a4ec1eff | 1320 | /* |
1da177e4 LT |
1321 | * Set the new multiplier for each CPU. CPUs don't start using the |
1322 | * new values until the next timer interrupt in which they do process | |
1323 | * accounting. | |
1324 | */ | |
1325 | for (i = 0; i < NR_CPUS; ++i) | |
1326 | per_cpu(prof_multiplier, i) = multiplier; | |
1327 | ||
1328 | return 0; | |
1329 | } | |
1330 | ||
c771746e JB |
1331 | /* This is a bit of a mess, but forced on us by the genirq changes |
1332 | * there's no genirq handler that really does what voyager wants | |
1333 | * so hack it up with the simple IRQ handler */ | |
75604d7f | 1334 | static void handle_vic_irq(unsigned int irq, struct irq_desc *desc) |
c771746e JB |
1335 | { |
1336 | before_handle_vic_irq(irq); | |
1337 | handle_simple_irq(irq, desc); | |
1338 | after_handle_vic_irq(irq); | |
1339 | } | |
1340 | ||
1da177e4 LT |
1341 | /* The CPIs are handled in the per cpu 8259s, so they must be |
1342 | * enabled to be received: FIX: enabling the CPIs in the early | |
1343 | * boot sequence interferes with bug checking; enable them later | |
1344 | * on in smp_init */ | |
1345 | #define VIC_SET_GATE(cpi, vector) \ | |
1346 | set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector)) | |
1347 | #define QIC_SET_GATE(cpi, vector) \ | |
1348 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) | |
1349 | ||
a4ec1eff | 1350 | void __init smp_intr_init(void) |
1da177e4 LT |
1351 | { |
1352 | int i; | |
1353 | ||
1354 | /* initialize the per cpu irq mask to all disabled */ | |
a4ec1eff | 1355 | for (i = 0; i < NR_CPUS; i++) |
1da177e4 LT |
1356 | vic_irq_mask[i] = 0xFFFF; |
1357 | ||
1358 | VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); | |
1359 | ||
1360 | VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt); | |
1361 | VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt); | |
1362 | ||
1363 | QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt); | |
1364 | QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt); | |
1365 | QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt); | |
1366 | QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt); | |
1367 | QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt); | |
1da177e4 | 1368 | |
a4ec1eff | 1369 | /* now put the VIC descriptor into the first 48 IRQs |
1da177e4 LT |
1370 | * |
1371 | * This is for later: first 16 correspond to PC IRQs; next 16 | |
1372 | * are Primary MC IRQs and final 16 are Secondary MC IRQs */ | |
a4ec1eff | 1373 | for (i = 0; i < 48; i++) |
c771746e | 1374 | set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq); |
1da177e4 LT |
1375 | } |
1376 | ||
1377 | /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per | |
1378 | * processor to receive CPI */ | |
a4ec1eff | 1379 | static void send_CPI(__u32 cpuset, __u8 cpi) |
1da177e4 LT |
1380 | { |
1381 | int cpu; | |
1382 | __u32 quad_cpuset = (cpuset & voyager_quad_processors); | |
1383 | ||
a4ec1eff IM |
1384 | if (cpi < VIC_START_FAKE_CPI) { |
1385 | /* fake CPI are only used for booting, so send to the | |
1da177e4 | 1386 | * extended quads as well---Quads must be VIC booted */ |
a4ec1eff | 1387 | outb((__u8) (cpuset), VIC_CPI_Registers[cpi]); |
1da177e4 LT |
1388 | return; |
1389 | } | |
a4ec1eff | 1390 | if (quad_cpuset) |
1da177e4 LT |
1391 | send_QIC_CPI(quad_cpuset, cpi); |
1392 | cpuset &= ~quad_cpuset; | |
1393 | cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ | |
a4ec1eff | 1394 | if (cpuset == 0) |
1da177e4 LT |
1395 | return; |
1396 | for_each_online_cpu(cpu) { | |
a4ec1eff | 1397 | if (cpuset & (1 << cpu)) |
1da177e4 LT |
1398 | set_bit(cpi, &vic_cpi_mailbox[cpu]); |
1399 | } | |
a4ec1eff IM |
1400 | if (cpuset) |
1401 | outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]); | |
1da177e4 LT |
1402 | } |
1403 | ||
1404 | /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and | |
1405 | * set the cache line to shared by reading it. | |
1406 | * | |
1407 | * DON'T make this inline otherwise the cache line read will be | |
1408 | * optimised away | |
1409 | * */ | |
a4ec1eff IM |
1410 | static int ack_QIC_CPI(__u8 cpi) |
1411 | { | |
1da177e4 LT |
1412 | __u8 cpu = hard_smp_processor_id(); |
1413 | ||
1414 | cpi &= 7; | |
1415 | ||
a4ec1eff | 1416 | outb(1 << cpi, QIC_INTERRUPT_CLEAR1); |
1da177e4 LT |
1417 | return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi; |
1418 | } | |
1419 | ||
a4ec1eff | 1420 | static void ack_special_QIC_CPI(__u8 cpi) |
1da177e4 | 1421 | { |
a4ec1eff | 1422 | switch (cpi) { |
1da177e4 LT |
1423 | case VIC_CMN_INT: |
1424 | outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0); | |
1425 | break; | |
1426 | case VIC_SYS_INT: | |
1427 | outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0); | |
1428 | break; | |
1429 | } | |
1430 | /* also clear at the VIC, just in case (nop for non-extended proc) */ | |
1431 | ack_VIC_CPI(cpi); | |
1432 | } | |
1433 | ||
1434 | /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */ | |
a4ec1eff | 1435 | static void ack_VIC_CPI(__u8 cpi) |
1da177e4 LT |
1436 | { |
1437 | #ifdef VOYAGER_DEBUG | |
1438 | unsigned long flags; | |
1439 | __u16 isr; | |
1440 | __u8 cpu = smp_processor_id(); | |
1441 | ||
1442 | local_irq_save(flags); | |
1443 | isr = vic_read_isr(); | |
a4ec1eff | 1444 | if ((isr & (1 << (cpi & 7))) == 0) { |
1da177e4 LT |
1445 | printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi); |
1446 | } | |
1447 | #endif | |
1448 | /* send specific EOI; the two system interrupts have | |
1449 | * bit 4 set for a separate vector but behave as the | |
1450 | * corresponding 3 bit intr */ | |
a4ec1eff | 1451 | outb_p(0x60 | (cpi & 7), 0x20); |
1da177e4 LT |
1452 | |
1453 | #ifdef VOYAGER_DEBUG | |
a4ec1eff | 1454 | if ((vic_read_isr() & (1 << (cpi & 7))) != 0) { |
1da177e4 LT |
1455 | printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi); |
1456 | } | |
1457 | local_irq_restore(flags); | |
1458 | #endif | |
1459 | } | |
1460 | ||
1461 | /* cribbed with thanks from irq.c */ | |
a4ec1eff | 1462 | #define __byte(x,y) (((unsigned char *)&(y))[x]) |
1da177e4 LT |
1463 | #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu])) |
1464 | #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu])) | |
1465 | ||
a4ec1eff | 1466 | static unsigned int startup_vic_irq(unsigned int irq) |
1da177e4 | 1467 | { |
c771746e | 1468 | unmask_vic_irq(irq); |
1da177e4 LT |
1469 | |
1470 | return 0; | |
1471 | } | |
1472 | ||
1473 | /* The enable and disable routines. This is where we run into | |
1474 | * conflicting architectural philosophy. Fundamentally, the voyager | |
1475 | * architecture does not expect to have to disable interrupts globally | |
1476 | * (the IRQ controllers belong to each CPU). The processor masquerade | |
1477 | * which is used to start the system shouldn't be used in a running OS | |
1478 | * since it will cause great confusion if two separate CPUs drive to | |
1479 | * the same IRQ controller (I know, I've tried it). | |
1480 | * | |
1481 | * The solution is a variant on the NCR lazy SPL design: | |
1482 | * | |
1483 | * 1) To disable an interrupt, do nothing (other than set the | |
1484 | * IRQ_DISABLED flag). This dares the interrupt actually to arrive. | |
1485 | * | |
1486 | * 2) If the interrupt dares to come in, raise the local mask against | |
1487 | * it (this will result in all the CPU masks being raised | |
1488 | * eventually). | |
1489 | * | |
1490 | * 3) To enable the interrupt, lower the mask on the local CPU and | |
1491 | * broadcast an Interrupt enable CPI which causes all other CPUs to | |
1492 | * adjust their masks accordingly. */ | |
1493 | ||
a4ec1eff | 1494 | static void unmask_vic_irq(unsigned int irq) |
1da177e4 LT |
1495 | { |
1496 | /* linux doesn't to processor-irq affinity, so enable on | |
1497 | * all CPUs we know about */ | |
1498 | int cpu = smp_processor_id(), real_cpu; | |
a4ec1eff | 1499 | __u16 mask = (1 << irq); |
1da177e4 LT |
1500 | __u32 processorList = 0; |
1501 | unsigned long flags; | |
1502 | ||
c771746e | 1503 | VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n", |
1da177e4 LT |
1504 | irq, cpu, cpu_irq_affinity[cpu])); |
1505 | spin_lock_irqsave(&vic_irq_lock, flags); | |
1506 | for_each_online_cpu(real_cpu) { | |
a4ec1eff | 1507 | if (!(voyager_extended_vic_processors & (1 << real_cpu))) |
1da177e4 | 1508 | continue; |
a4ec1eff | 1509 | if (!(cpu_irq_affinity[real_cpu] & mask)) { |
1da177e4 LT |
1510 | /* irq has no affinity for this CPU, ignore */ |
1511 | continue; | |
1512 | } | |
a4ec1eff | 1513 | if (real_cpu == cpu) { |
1da177e4 | 1514 | enable_local_vic_irq(irq); |
a4ec1eff | 1515 | } else if (vic_irq_mask[real_cpu] & mask) { |
1da177e4 | 1516 | vic_irq_enable_mask[real_cpu] |= mask; |
a4ec1eff | 1517 | processorList |= (1 << real_cpu); |
1da177e4 LT |
1518 | } |
1519 | } | |
1520 | spin_unlock_irqrestore(&vic_irq_lock, flags); | |
a4ec1eff | 1521 | if (processorList) |
1da177e4 LT |
1522 | send_CPI(processorList, VIC_ENABLE_IRQ_CPI); |
1523 | } | |
1524 | ||
a4ec1eff | 1525 | static void mask_vic_irq(unsigned int irq) |
1da177e4 LT |
1526 | { |
1527 | /* lazy disable, do nothing */ | |
1528 | } | |
1529 | ||
a4ec1eff | 1530 | static void enable_local_vic_irq(unsigned int irq) |
1da177e4 LT |
1531 | { |
1532 | __u8 cpu = smp_processor_id(); | |
1533 | __u16 mask = ~(1 << irq); | |
1534 | __u16 old_mask = vic_irq_mask[cpu]; | |
1535 | ||
1536 | vic_irq_mask[cpu] &= mask; | |
a4ec1eff | 1537 | if (vic_irq_mask[cpu] == old_mask) |
1da177e4 LT |
1538 | return; |
1539 | ||
1540 | VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n", | |
1541 | irq, cpu)); | |
1542 | ||
1543 | if (irq & 8) { | |
a4ec1eff | 1544 | outb_p(cached_A1(cpu), 0xA1); |
1da177e4 | 1545 | (void)inb_p(0xA1); |
a4ec1eff IM |
1546 | } else { |
1547 | outb_p(cached_21(cpu), 0x21); | |
1da177e4 LT |
1548 | (void)inb_p(0x21); |
1549 | } | |
1550 | } | |
1551 | ||
a4ec1eff | 1552 | static void disable_local_vic_irq(unsigned int irq) |
1da177e4 LT |
1553 | { |
1554 | __u8 cpu = smp_processor_id(); | |
1555 | __u16 mask = (1 << irq); | |
1556 | __u16 old_mask = vic_irq_mask[cpu]; | |
1557 | ||
a4ec1eff | 1558 | if (irq == 7) |
1da177e4 LT |
1559 | return; |
1560 | ||
1561 | vic_irq_mask[cpu] |= mask; | |
a4ec1eff | 1562 | if (old_mask == vic_irq_mask[cpu]) |
1da177e4 LT |
1563 | return; |
1564 | ||
1565 | VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n", | |
1566 | irq, cpu)); | |
1567 | ||
1568 | if (irq & 8) { | |
a4ec1eff | 1569 | outb_p(cached_A1(cpu), 0xA1); |
1da177e4 | 1570 | (void)inb_p(0xA1); |
a4ec1eff IM |
1571 | } else { |
1572 | outb_p(cached_21(cpu), 0x21); | |
1da177e4 LT |
1573 | (void)inb_p(0x21); |
1574 | } | |
1575 | } | |
1576 | ||
1577 | /* The VIC is level triggered, so the ack can only be issued after the | |
1578 | * interrupt completes. However, we do Voyager lazy interrupt | |
1579 | * handling here: It is an extremely expensive operation to mask an | |
1580 | * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If | |
1581 | * this interrupt actually comes in, then we mask and ack here to push | |
1582 | * the interrupt off to another CPU */ | |
a4ec1eff | 1583 | static void before_handle_vic_irq(unsigned int irq) |
1da177e4 LT |
1584 | { |
1585 | irq_desc_t *desc = irq_desc + irq; | |
1586 | __u8 cpu = smp_processor_id(); | |
1587 | ||
1588 | _raw_spin_lock(&vic_irq_lock); | |
1589 | vic_intr_total++; | |
1590 | vic_intr_count[cpu]++; | |
1591 | ||
a4ec1eff | 1592 | if (!(cpu_irq_affinity[cpu] & (1 << irq))) { |
1da177e4 LT |
1593 | /* The irq is not in our affinity mask, push it off |
1594 | * onto another CPU */ | |
a4ec1eff IM |
1595 | VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d " |
1596 | "on cpu %d\n", irq, cpu)); | |
1da177e4 LT |
1597 | disable_local_vic_irq(irq); |
1598 | /* set IRQ_INPROGRESS to prevent the handler in irq.c from | |
1599 | * actually calling the interrupt routine */ | |
1600 | desc->status |= IRQ_REPLAY | IRQ_INPROGRESS; | |
a4ec1eff | 1601 | } else if (desc->status & IRQ_DISABLED) { |
1da177e4 LT |
1602 | /* Damn, the interrupt actually arrived, do the lazy |
1603 | * disable thing. The interrupt routine in irq.c will | |
1604 | * not handle a IRQ_DISABLED interrupt, so nothing more | |
1605 | * need be done here */ | |
1606 | VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n", | |
1607 | irq, cpu)); | |
1608 | disable_local_vic_irq(irq); | |
1609 | desc->status |= IRQ_REPLAY; | |
1610 | } else { | |
1611 | desc->status &= ~IRQ_REPLAY; | |
1612 | } | |
1613 | ||
1614 | _raw_spin_unlock(&vic_irq_lock); | |
1615 | } | |
1616 | ||
1617 | /* Finish the VIC interrupt: basically mask */ | |
a4ec1eff | 1618 | static void after_handle_vic_irq(unsigned int irq) |
1da177e4 LT |
1619 | { |
1620 | irq_desc_t *desc = irq_desc + irq; | |
1621 | ||
1622 | _raw_spin_lock(&vic_irq_lock); | |
1623 | { | |
1624 | unsigned int status = desc->status & ~IRQ_INPROGRESS; | |
1625 | #ifdef VOYAGER_DEBUG | |
1626 | __u16 isr; | |
1627 | #endif | |
1628 | ||
1629 | desc->status = status; | |
1630 | if ((status & IRQ_DISABLED)) | |
1631 | disable_local_vic_irq(irq); | |
1632 | #ifdef VOYAGER_DEBUG | |
1633 | /* DEBUG: before we ack, check what's in progress */ | |
1634 | isr = vic_read_isr(); | |
a4ec1eff | 1635 | if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) { |
1da177e4 LT |
1636 | int i; |
1637 | __u8 cpu = smp_processor_id(); | |
1638 | __u8 real_cpu; | |
a4ec1eff | 1639 | int mask; /* Um... initialize me??? --RR */ |
1da177e4 LT |
1640 | |
1641 | printk("VOYAGER SMP: CPU%d lost interrupt %d\n", | |
1642 | cpu, irq); | |
c8912599 | 1643 | for_each_possible_cpu(real_cpu, mask) { |
1da177e4 LT |
1644 | |
1645 | outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, | |
1646 | VIC_PROCESSOR_ID); | |
1647 | isr = vic_read_isr(); | |
a4ec1eff IM |
1648 | if (isr & (1 << irq)) { |
1649 | printk | |
1650 | ("VOYAGER SMP: CPU%d ack irq %d\n", | |
1651 | real_cpu, irq); | |
1da177e4 LT |
1652 | ack_vic_irq(irq); |
1653 | } | |
1654 | outb(cpu, VIC_PROCESSOR_ID); | |
1655 | } | |
1656 | } | |
1657 | #endif /* VOYAGER_DEBUG */ | |
1658 | /* as soon as we ack, the interrupt is eligible for | |
1659 | * receipt by another CPU so everything must be in | |
1660 | * order here */ | |
1661 | ack_vic_irq(irq); | |
a4ec1eff | 1662 | if (status & IRQ_REPLAY) { |
1da177e4 LT |
1663 | /* replay is set if we disable the interrupt |
1664 | * in the before_handle_vic_irq() routine, so | |
1665 | * clear the in progress bit here to allow the | |
1666 | * next CPU to handle this correctly */ | |
1667 | desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS); | |
1668 | } | |
1669 | #ifdef VOYAGER_DEBUG | |
1670 | isr = vic_read_isr(); | |
a4ec1eff IM |
1671 | if ((isr & (1 << irq)) != 0) |
1672 | printk("VOYAGER SMP: after_handle_vic_irq() after " | |
1673 | "ack irq=%d, isr=0x%x\n", irq, isr); | |
1da177e4 LT |
1674 | #endif /* VOYAGER_DEBUG */ |
1675 | } | |
1676 | _raw_spin_unlock(&vic_irq_lock); | |
1677 | ||
1678 | /* All code after this point is out of the main path - the IRQ | |
1679 | * may be intercepted by another CPU if reasserted */ | |
1680 | } | |
1681 | ||
1da177e4 LT |
1682 | /* Linux processor - interrupt affinity manipulations. |
1683 | * | |
1684 | * For each processor, we maintain a 32 bit irq affinity mask. | |
1685 | * Initially it is set to all 1's so every processor accepts every | |
1686 | * interrupt. In this call, we change the processor's affinity mask: | |
1687 | * | |
1688 | * Change from enable to disable: | |
1689 | * | |
1690 | * If the interrupt ever comes in to the processor, we will disable it | |
1691 | * and ack it to push it off to another CPU, so just accept the mask here. | |
1692 | * | |
1693 | * Change from disable to enable: | |
1694 | * | |
1695 | * change the mask and then do an interrupt enable CPI to re-enable on | |
1696 | * the selected processors */ | |
1697 | ||
a4ec1eff | 1698 | void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) |
1da177e4 LT |
1699 | { |
1700 | /* Only extended processors handle interrupts */ | |
1701 | unsigned long real_mask; | |
1702 | unsigned long irq_mask = 1 << irq; | |
1703 | int cpu; | |
1704 | ||
1705 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; | |
a4ec1eff IM |
1706 | |
1707 | if (cpus_addr(mask)[0] == 0) | |
27b46d76 | 1708 | /* can't have no CPUs to accept the interrupt -- extremely |
1da177e4 LT |
1709 | * bad things will happen */ |
1710 | return; | |
1711 | ||
a4ec1eff | 1712 | if (irq == 0) |
1da177e4 LT |
1713 | /* can't change the affinity of the timer IRQ. This |
1714 | * is due to the constraint in the voyager | |
1715 | * architecture that the CPI also comes in on and IRQ | |
1716 | * line and we have chosen IRQ0 for this. If you | |
1717 | * raise the mask on this interrupt, the processor | |
1718 | * will no-longer be able to accept VIC CPIs */ | |
1719 | return; | |
1720 | ||
a4ec1eff | 1721 | if (irq >= 32) |
1da177e4 LT |
1722 | /* You can only have 32 interrupts in a voyager system |
1723 | * (and 32 only if you have a secondary microchannel | |
1724 | * bus) */ | |
1725 | return; | |
1726 | ||
1727 | for_each_online_cpu(cpu) { | |
1728 | unsigned long cpu_mask = 1 << cpu; | |
a4ec1eff IM |
1729 | |
1730 | if (cpu_mask & real_mask) { | |
1da177e4 LT |
1731 | /* enable the interrupt for this cpu */ |
1732 | cpu_irq_affinity[cpu] |= irq_mask; | |
1733 | } else { | |
1734 | /* disable the interrupt for this cpu */ | |
1735 | cpu_irq_affinity[cpu] &= ~irq_mask; | |
1736 | } | |
1737 | } | |
1738 | /* this is magic, we now have the correct affinity maps, so | |
1739 | * enable the interrupt. This will send an enable CPI to | |
27b46d76 | 1740 | * those CPUs who need to enable it in their local masks, |
1da177e4 LT |
1741 | * causing them to correct for the new affinity . If the |
1742 | * interrupt is currently globally disabled, it will simply be | |
1743 | * disabled again as it comes in (voyager lazy disable). If | |
1744 | * the affinity map is tightened to disable the interrupt on a | |
1745 | * cpu, it will be pushed off when it comes in */ | |
c771746e | 1746 | unmask_vic_irq(irq); |
1da177e4 LT |
1747 | } |
1748 | ||
a4ec1eff | 1749 | static void ack_vic_irq(unsigned int irq) |
1da177e4 LT |
1750 | { |
1751 | if (irq & 8) { | |
a4ec1eff IM |
1752 | outb(0x62, 0x20); /* Specific EOI to cascade */ |
1753 | outb(0x60 | (irq & 7), 0xA0); | |
1da177e4 | 1754 | } else { |
a4ec1eff | 1755 | outb(0x60 | (irq & 7), 0x20); |
1da177e4 LT |
1756 | } |
1757 | } | |
1758 | ||
1759 | /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259 | |
1760 | * but are not vectored by it. This means that the 8259 mask must be | |
1761 | * lowered to receive them */ | |
a4ec1eff | 1762 | static __init void vic_enable_cpi(void) |
1da177e4 LT |
1763 | { |
1764 | __u8 cpu = smp_processor_id(); | |
a4ec1eff | 1765 | |
1da177e4 LT |
1766 | /* just take a copy of the current mask (nop for boot cpu) */ |
1767 | vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id]; | |
1768 | ||
1769 | enable_local_vic_irq(VIC_CPI_LEVEL0); | |
1770 | enable_local_vic_irq(VIC_CPI_LEVEL1); | |
1771 | /* for sys int and cmn int */ | |
1772 | enable_local_vic_irq(7); | |
1773 | ||
a4ec1eff | 1774 | if (is_cpu_quad()) { |
1da177e4 LT |
1775 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); |
1776 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | |
1777 | VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n", | |
1778 | cpu, QIC_CPI_ENABLE)); | |
1779 | } | |
1780 | ||
1781 | VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n", | |
1782 | cpu, vic_irq_mask[cpu])); | |
1783 | } | |
1784 | ||
a4ec1eff | 1785 | void voyager_smp_dump() |
1da177e4 LT |
1786 | { |
1787 | int old_cpu = smp_processor_id(), cpu; | |
1788 | ||
1789 | /* dump the interrupt masks of each processor */ | |
1790 | for_each_online_cpu(cpu) { | |
1791 | __u16 imr, isr, irr; | |
1792 | unsigned long flags; | |
1793 | ||
1794 | local_irq_save(flags); | |
1795 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | |
1796 | imr = (inb(0xa1) << 8) | inb(0x21); | |
1797 | outb(0x0a, 0xa0); | |
1798 | irr = inb(0xa0) << 8; | |
1799 | outb(0x0a, 0x20); | |
1800 | irr |= inb(0x20); | |
1801 | outb(0x0b, 0xa0); | |
1802 | isr = inb(0xa0) << 8; | |
1803 | outb(0x0b, 0x20); | |
1804 | isr |= inb(0x20); | |
1805 | outb(old_cpu, VIC_PROCESSOR_ID); | |
1806 | local_irq_restore(flags); | |
1807 | printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n", | |
1808 | cpu, vic_irq_mask[cpu], imr, irr, isr); | |
1809 | #if 0 | |
1810 | /* These lines are put in to try to unstick an un ack'd irq */ | |
a4ec1eff | 1811 | if (isr != 0) { |
1da177e4 | 1812 | int irq; |
a4ec1eff IM |
1813 | for (irq = 0; irq < 16; irq++) { |
1814 | if (isr & (1 << irq)) { | |
1da177e4 LT |
1815 | printk("\tCPU%d: ack irq %d\n", |
1816 | cpu, irq); | |
1817 | local_irq_save(flags); | |
1818 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, | |
1819 | VIC_PROCESSOR_ID); | |
1820 | ack_vic_irq(irq); | |
1821 | outb(old_cpu, VIC_PROCESSOR_ID); | |
1822 | local_irq_restore(flags); | |
1823 | } | |
1824 | } | |
1825 | } | |
1826 | #endif | |
1827 | } | |
1828 | } | |
1829 | ||
a4ec1eff | 1830 | void smp_voyager_power_off(void *dummy) |
1da177e4 | 1831 | { |
a4ec1eff | 1832 | if (smp_processor_id() == boot_cpu_id) |
1da177e4 LT |
1833 | voyager_power_off(); |
1834 | else | |
1835 | smp_stop_cpu_function(NULL); | |
1836 | } | |
1837 | ||
a4ec1eff | 1838 | static void __init voyager_smp_prepare_cpus(unsigned int max_cpus) |
1da177e4 LT |
1839 | { |
1840 | /* FIXME: ignore max_cpus for now */ | |
1841 | smp_boot_cpus(); | |
1842 | } | |
1843 | ||
8f818210 | 1844 | static void __cpuinit voyager_smp_prepare_boot_cpu(void) |
1da177e4 | 1845 | { |
6a3ee3d5 JF |
1846 | init_gdt(smp_processor_id()); |
1847 | switch_to_new_gdt(); | |
1848 | ||
1da177e4 LT |
1849 | cpu_set(smp_processor_id(), cpu_online_map); |
1850 | cpu_set(smp_processor_id(), cpu_callout_map); | |
4ad8d383 | 1851 | cpu_set(smp_processor_id(), cpu_possible_map); |
3c101cf0 | 1852 | cpu_set(smp_processor_id(), cpu_present_map); |
1da177e4 LT |
1853 | } |
1854 | ||
a4ec1eff | 1855 | static int __cpuinit voyager_cpu_up(unsigned int cpu) |
1da177e4 LT |
1856 | { |
1857 | /* This only works at boot for x86. See "rewrite" above. */ | |
1858 | if (cpu_isset(cpu, smp_commenced_mask)) | |
1859 | return -ENOSYS; | |
1860 | ||
1861 | /* In case one didn't come up */ | |
1862 | if (!cpu_isset(cpu, cpu_callin_map)) | |
1863 | return -EIO; | |
1864 | /* Unleash the CPU! */ | |
1865 | cpu_set(cpu, smp_commenced_mask); | |
1866 | while (!cpu_isset(cpu, cpu_online_map)) | |
1867 | mb(); | |
1868 | return 0; | |
1869 | } | |
1870 | ||
a4ec1eff | 1871 | static void __init voyager_smp_cpus_done(unsigned int max_cpus) |
1da177e4 LT |
1872 | { |
1873 | zap_low_mappings(); | |
1874 | } | |
033ab7f8 | 1875 | |
a4ec1eff | 1876 | void __init smp_setup_processor_id(void) |
033ab7f8 AM |
1877 | { |
1878 | current_thread_info()->cpu = hard_smp_processor_id(); | |
6a3ee3d5 | 1879 | x86_write_percpu(cpu_number, hard_smp_processor_id()); |
033ab7f8 | 1880 | } |
6a3ee3d5 JF |
1881 | |
1882 | struct smp_ops smp_ops = { | |
1883 | .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, | |
1884 | .smp_prepare_cpus = voyager_smp_prepare_cpus, | |
1885 | .cpu_up = voyager_cpu_up, | |
1886 | .smp_cpus_done = voyager_smp_cpus_done, | |
1887 | ||
1888 | .smp_send_stop = voyager_smp_send_stop, | |
1889 | .smp_send_reschedule = voyager_smp_send_reschedule, | |
1890 | .smp_call_function_mask = voyager_smp_call_function_mask, | |
1891 | }; |