x86: move mp_irqs to io_apics_32.c
[deliverable/linux.git] / arch / x86 / kernel / mpparse_32.c
1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
14 */
15
16 #include <linux/mm.h>
17 #include <linux/init.h>
18 #include <linux/acpi.h>
19 #include <linux/delay.h>
20 #include <linux/bootmem.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/mc146818rtc.h>
23 #include <linux/bitops.h>
24
25 #include <asm/smp.h>
26 #include <asm/acpi.h>
27 #include <asm/mtrr.h>
28 #include <asm/mpspec.h>
29 #include <asm/io_apic.h>
30 #include <asm/bios_ebda.h>
31
32 #include <mach_apic.h>
33 #include <mach_apicdef.h>
34 #include <mach_mpparse.h>
35
36 /* Have we found an MP table */
37 int smp_found_config;
38
39 /*
40 * Various Linux-internal data structures created from the
41 * MP-table.
42 */
43 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
44 int mp_bus_id_to_type [MAX_MP_BUSSES];
45 #endif
46 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
47 int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
48 static int mp_current_pci_id;
49
50 int pic_mode;
51
52 /* Make it easy to share the UP and SMP code: */
53 #ifndef CONFIG_X86_SMP
54 unsigned int num_processors;
55 unsigned disabled_cpus __cpuinitdata;
56 #ifndef CONFIG_X86_LOCAL_APIC
57 unsigned int boot_cpu_physical_apicid = -1U;
58 #endif
59 #endif
60
61 /*
62 * Intel MP BIOS table parsing routines:
63 */
64
65
66 /*
67 * Checksum an MP configuration block.
68 */
69
70 static int __init mpf_checksum(unsigned char *mp, int len)
71 {
72 int sum = 0;
73
74 while (len--)
75 sum += *mp++;
76
77 return sum & 0xFF;
78 }
79
80 #ifdef CONFIG_X86_NUMAQ
81 /*
82 * Have to match translation table entries to main table entries by counter
83 * hence the mpc_record variable .... can't see a less disgusting way of
84 * doing this ....
85 */
86
87 static int mpc_record;
88 static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata;
89 #endif
90
91 static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
92 {
93 int apicid;
94
95 if (!(m->mpc_cpuflag & CPU_ENABLED)) {
96 #ifdef CONFIG_X86_SMP
97 disabled_cpus++;
98 #endif
99 return;
100 }
101
102 #ifdef CONFIG_X86_NUMAQ
103 apicid = mpc_apic_id(m, translation_table[mpc_record]);
104 #else
105 Dprintk("Processor #%d %u:%u APIC version %d\n",
106 m->mpc_apicid,
107 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
108 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
109 m->mpc_apicver);
110 apicid = m->mpc_apicid;
111 #endif
112
113 if (m->mpc_featureflag&(1<<0))
114 Dprintk(" Floating point unit present.\n");
115 if (m->mpc_featureflag&(1<<7))
116 Dprintk(" Machine Exception supported.\n");
117 if (m->mpc_featureflag&(1<<8))
118 Dprintk(" 64 bit compare & exchange supported.\n");
119 if (m->mpc_featureflag&(1<<9))
120 Dprintk(" Internal APIC present.\n");
121 if (m->mpc_featureflag&(1<<11))
122 Dprintk(" SEP present.\n");
123 if (m->mpc_featureflag&(1<<12))
124 Dprintk(" MTRR present.\n");
125 if (m->mpc_featureflag&(1<<13))
126 Dprintk(" PGE present.\n");
127 if (m->mpc_featureflag&(1<<14))
128 Dprintk(" MCA present.\n");
129 if (m->mpc_featureflag&(1<<15))
130 Dprintk(" CMOV present.\n");
131 if (m->mpc_featureflag&(1<<16))
132 Dprintk(" PAT present.\n");
133 if (m->mpc_featureflag&(1<<17))
134 Dprintk(" PSE present.\n");
135 if (m->mpc_featureflag&(1<<18))
136 Dprintk(" PSN present.\n");
137 if (m->mpc_featureflag&(1<<19))
138 Dprintk(" Cache Line Flush Instruction present.\n");
139 /* 20 Reserved */
140 if (m->mpc_featureflag&(1<<21))
141 Dprintk(" Debug Trace and EMON Store present.\n");
142 if (m->mpc_featureflag&(1<<22))
143 Dprintk(" ACPI Thermal Throttle Registers present.\n");
144 if (m->mpc_featureflag&(1<<23))
145 Dprintk(" MMX present.\n");
146 if (m->mpc_featureflag&(1<<24))
147 Dprintk(" FXSR present.\n");
148 if (m->mpc_featureflag&(1<<25))
149 Dprintk(" XMM present.\n");
150 if (m->mpc_featureflag&(1<<26))
151 Dprintk(" Willamette New Instructions present.\n");
152 if (m->mpc_featureflag&(1<<27))
153 Dprintk(" Self Snoop present.\n");
154 if (m->mpc_featureflag&(1<<28))
155 Dprintk(" HT present.\n");
156 if (m->mpc_featureflag&(1<<29))
157 Dprintk(" Thermal Monitor present.\n");
158 /* 30, 31 Reserved */
159
160
161 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
162 Dprintk(" Bootup CPU\n");
163 boot_cpu_physical_apicid = m->mpc_apicid;
164 }
165
166 generic_processor_info(apicid, m->mpc_apicver);
167 }
168
169 static void __init MP_bus_info (struct mpc_config_bus *m)
170 {
171 char str[7];
172
173 memcpy(str, m->mpc_bustype, 6);
174 str[6] = 0;
175
176 #ifdef CONFIG_X86_NUMAQ
177 mpc_oem_bus_info(m, str, translation_table[mpc_record]);
178 #else
179 Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
180 #endif
181
182 #if MAX_MP_BUSSES < 256
183 if (m->mpc_busid >= MAX_MP_BUSSES) {
184 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
185 " is too large, max. supported is %d\n",
186 m->mpc_busid, str, MAX_MP_BUSSES - 1);
187 return;
188 }
189 #endif
190
191 set_bit(m->mpc_busid, mp_bus_not_pci);
192 if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
193 #ifdef CONFIG_X86_NUMAQ
194 mpc_oem_pci_bus(m, translation_table[mpc_record]);
195 #endif
196 clear_bit(m->mpc_busid, mp_bus_not_pci);
197 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
198 mp_current_pci_id++;
199 #if defined(CONFIG_EISA) || defined (CONFIG_MCA)
200 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
201 } else if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
202 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
203 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
204 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
205 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
206 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
207 } else {
208 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
209 #endif
210 }
211 }
212
213 #ifdef CONFIG_X86_IO_APIC
214
215 static int bad_ioapic(unsigned long address)
216 {
217 if (nr_ioapics >= MAX_IO_APICS) {
218 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
219 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
220 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
221 }
222 if (!address) {
223 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
224 " found in table, skipping!\n");
225 return 1;
226 }
227 return 0;
228 }
229
230 static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
231 {
232 if (!(m->mpc_flags & MPC_APIC_USABLE))
233 return;
234
235 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
236 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
237
238 if (bad_ioapic(m->mpc_apicaddr))
239 return;
240
241 mp_ioapics[nr_ioapics] = *m;
242 nr_ioapics++;
243 }
244
245 static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
246 {
247 mp_irqs [mp_irq_entries] = *m;
248 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
249 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
250 m->mpc_irqtype, m->mpc_irqflag & 3,
251 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
252 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
253 if (++mp_irq_entries == MAX_IRQ_SOURCES)
254 panic("Max # of irq sources exceeded!!\n");
255 }
256
257 #endif
258
259 static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
260 {
261 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
262 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
263 m->mpc_irqtype, m->mpc_irqflag & 3,
264 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
265 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
266 }
267
268 #ifdef CONFIG_X86_NUMAQ
269 static void __init MP_translation_info (struct mpc_config_translation *m)
270 {
271 printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
272
273 if (mpc_record >= MAX_MPC_ENTRY)
274 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
275 else
276 translation_table[mpc_record] = m; /* stash this for later */
277 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
278 node_set_online(m->trans_quad);
279 }
280
281 /*
282 * Read/parse the MPC oem tables
283 */
284
285 static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
286 unsigned short oemsize)
287 {
288 int count = sizeof (*oemtable); /* the header size */
289 unsigned char *oemptr = ((unsigned char *)oemtable)+count;
290
291 mpc_record = 0;
292 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
293 if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
294 {
295 printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
296 oemtable->oem_signature[0],
297 oemtable->oem_signature[1],
298 oemtable->oem_signature[2],
299 oemtable->oem_signature[3]);
300 return;
301 }
302 if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
303 {
304 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
305 return;
306 }
307 while (count < oemtable->oem_length) {
308 switch (*oemptr) {
309 case MP_TRANSLATION:
310 {
311 struct mpc_config_translation *m=
312 (struct mpc_config_translation *)oemptr;
313 MP_translation_info(m);
314 oemptr += sizeof(*m);
315 count += sizeof(*m);
316 ++mpc_record;
317 break;
318 }
319 default:
320 {
321 printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
322 return;
323 }
324 }
325 }
326 }
327
328 static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
329 char *productid)
330 {
331 if (strncmp(oem, "IBM NUMA", 8))
332 printk("Warning! May not be a NUMA-Q system!\n");
333 if (mpc->mpc_oemptr)
334 smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
335 mpc->mpc_oemsize);
336 }
337 #endif /* CONFIG_X86_NUMAQ */
338
339 /*
340 * Read/parse the MPC
341 */
342
343 static int __init smp_read_mpc(struct mp_config_table *mpc)
344 {
345 char str[16];
346 char oem[10];
347 int count=sizeof(*mpc);
348 unsigned char *mpt=((unsigned char *)mpc)+count;
349
350 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
351 printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
352 *(u32 *)mpc->mpc_signature);
353 return 0;
354 }
355 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
356 printk(KERN_ERR "SMP mptable: checksum error!\n");
357 return 0;
358 }
359 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
360 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
361 mpc->mpc_spec);
362 return 0;
363 }
364 if (!mpc->mpc_lapic) {
365 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
366 return 0;
367 }
368 memcpy(oem,mpc->mpc_oem,8);
369 oem[8]=0;
370 printk(KERN_INFO "OEM ID: %s ",oem);
371
372 memcpy(str,mpc->mpc_productid,12);
373 str[12]=0;
374 printk("Product ID: %s ",str);
375
376 mps_oem_check(mpc, oem, str);
377
378 printk("APIC at: 0x%X\n", mpc->mpc_lapic);
379
380 /*
381 * Save the local APIC address (it might be non-default) -- but only
382 * if we're not using ACPI.
383 */
384 if (!acpi_lapic)
385 mp_lapic_addr = mpc->mpc_lapic;
386
387 /*
388 * Now process the configuration blocks.
389 */
390 #ifdef CONFIG_X86_NUMAQ
391 mpc_record = 0;
392 #endif
393 while (count < mpc->mpc_length) {
394 switch(*mpt) {
395 case MP_PROCESSOR:
396 {
397 struct mpc_config_processor *m=
398 (struct mpc_config_processor *)mpt;
399 /* ACPI may have already provided this data */
400 if (!acpi_lapic)
401 MP_processor_info(m);
402 mpt += sizeof(*m);
403 count += sizeof(*m);
404 break;
405 }
406 case MP_BUS:
407 {
408 struct mpc_config_bus *m=
409 (struct mpc_config_bus *)mpt;
410 MP_bus_info(m);
411 mpt += sizeof(*m);
412 count += sizeof(*m);
413 break;
414 }
415 case MP_IOAPIC:
416 {
417 #ifdef CONFIG_X86_IO_APIC
418 struct mpc_config_ioapic *m=
419 (struct mpc_config_ioapic *)mpt;
420 MP_ioapic_info(m);
421 #endif
422 mpt+=sizeof(struct mpc_config_ioapic);
423 count+=sizeof(struct mpc_config_ioapic);
424 break;
425 }
426 case MP_INTSRC:
427 {
428 #ifdef CONFIG_X86_IO_APIC
429 struct mpc_config_intsrc *m=
430 (struct mpc_config_intsrc *)mpt;
431
432 MP_intsrc_info(m);
433 #endif
434 mpt+=sizeof(struct mpc_config_intsrc);
435 count+=sizeof(struct mpc_config_intsrc);
436 break;
437 }
438 case MP_LINTSRC:
439 {
440 struct mpc_config_lintsrc *m=
441 (struct mpc_config_lintsrc *)mpt;
442 MP_lintsrc_info(m);
443 mpt+=sizeof(*m);
444 count+=sizeof(*m);
445 break;
446 }
447 default:
448 {
449 count = mpc->mpc_length;
450 break;
451 }
452 }
453 #ifdef CONFIG_X86_NUMAQ
454 ++mpc_record;
455 #endif
456 }
457 setup_apic_routing();
458 if (!num_processors)
459 printk(KERN_ERR "SMP mptable: no processors registered!\n");
460 return num_processors;
461 }
462
463 #ifdef CONFIG_X86_IO_APIC
464
465 static int __init ELCR_trigger(unsigned int irq)
466 {
467 unsigned int port;
468
469 port = 0x4d0 + (irq >> 3);
470 return (inb(port) >> (irq & 7)) & 1;
471 }
472
473 static void __init construct_default_ioirq_mptable(int mpc_default_type)
474 {
475 struct mpc_config_intsrc intsrc;
476 int i;
477 int ELCR_fallback = 0;
478
479 intsrc.mpc_type = MP_INTSRC;
480 intsrc.mpc_irqflag = 0; /* conforming */
481 intsrc.mpc_srcbus = 0;
482 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
483
484 intsrc.mpc_irqtype = mp_INT;
485
486 /*
487 * If true, we have an ISA/PCI system with no IRQ entries
488 * in the MP table. To prevent the PCI interrupts from being set up
489 * incorrectly, we try to use the ELCR. The sanity check to see if
490 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
491 * never be level sensitive, so we simply see if the ELCR agrees.
492 * If it does, we assume it's valid.
493 */
494 if (mpc_default_type == 5) {
495 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
496
497 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
498 printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
499 else {
500 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
501 ELCR_fallback = 1;
502 }
503 }
504
505 for (i = 0; i < 16; i++) {
506 switch (mpc_default_type) {
507 case 2:
508 if (i == 0 || i == 13)
509 continue; /* IRQ0 & IRQ13 not connected */
510 /* fall through */
511 default:
512 if (i == 2)
513 continue; /* IRQ2 is never connected */
514 }
515
516 if (ELCR_fallback) {
517 /*
518 * If the ELCR indicates a level-sensitive interrupt, we
519 * copy that information over to the MP table in the
520 * irqflag field (level sensitive, active high polarity).
521 */
522 if (ELCR_trigger(i))
523 intsrc.mpc_irqflag = 13;
524 else
525 intsrc.mpc_irqflag = 0;
526 }
527
528 intsrc.mpc_srcbusirq = i;
529 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
530 MP_intsrc_info(&intsrc);
531 }
532
533 intsrc.mpc_irqtype = mp_ExtINT;
534 intsrc.mpc_srcbusirq = 0;
535 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
536 MP_intsrc_info(&intsrc);
537 }
538
539 #endif
540
541 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
542 {
543 struct mpc_config_processor processor;
544 struct mpc_config_bus bus;
545 #ifdef CONFIG_X86_IO_APIC
546 struct mpc_config_ioapic ioapic;
547 #endif
548 struct mpc_config_lintsrc lintsrc;
549 int linttypes[2] = { mp_ExtINT, mp_NMI };
550 int i;
551
552 /*
553 * local APIC has default address
554 */
555 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
556
557 /*
558 * 2 CPUs, numbered 0 & 1.
559 */
560 processor.mpc_type = MP_PROCESSOR;
561 /* Either an integrated APIC or a discrete 82489DX. */
562 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
563 processor.mpc_cpuflag = CPU_ENABLED;
564 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
565 (boot_cpu_data.x86_model << 4) |
566 boot_cpu_data.x86_mask;
567 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
568 processor.mpc_reserved[0] = 0;
569 processor.mpc_reserved[1] = 0;
570 for (i = 0; i < 2; i++) {
571 processor.mpc_apicid = i;
572 MP_processor_info(&processor);
573 }
574
575 bus.mpc_type = MP_BUS;
576 bus.mpc_busid = 0;
577 switch (mpc_default_type) {
578 default:
579 printk("???\n");
580 printk(KERN_ERR "Unknown standard configuration %d\n",
581 mpc_default_type);
582 /* fall through */
583 case 1:
584 case 5:
585 memcpy(bus.mpc_bustype, "ISA ", 6);
586 break;
587 case 2:
588 case 6:
589 case 3:
590 memcpy(bus.mpc_bustype, "EISA ", 6);
591 break;
592 case 4:
593 case 7:
594 memcpy(bus.mpc_bustype, "MCA ", 6);
595 }
596 MP_bus_info(&bus);
597 if (mpc_default_type > 4) {
598 bus.mpc_busid = 1;
599 memcpy(bus.mpc_bustype, "PCI ", 6);
600 MP_bus_info(&bus);
601 }
602
603 #ifdef CONFIG_X86_IO_APIC
604 ioapic.mpc_type = MP_IOAPIC;
605 ioapic.mpc_apicid = 2;
606 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
607 ioapic.mpc_flags = MPC_APIC_USABLE;
608 ioapic.mpc_apicaddr = 0xFEC00000;
609 MP_ioapic_info(&ioapic);
610
611 /*
612 * We set up most of the low 16 IO-APIC pins according to MPS rules.
613 */
614 construct_default_ioirq_mptable(mpc_default_type);
615 #endif
616 lintsrc.mpc_type = MP_LINTSRC;
617 lintsrc.mpc_irqflag = 0; /* conforming */
618 lintsrc.mpc_srcbusid = 0;
619 lintsrc.mpc_srcbusirq = 0;
620 lintsrc.mpc_destapic = MP_APIC_ALL;
621 for (i = 0; i < 2; i++) {
622 lintsrc.mpc_irqtype = linttypes[i];
623 lintsrc.mpc_destapiclint = i;
624 MP_lintsrc_info(&lintsrc);
625 }
626 }
627
628 static struct intel_mp_floating *mpf_found;
629
630 /*
631 * Scan the memory blocks for an SMP configuration block.
632 */
633 void __init get_smp_config (void)
634 {
635 struct intel_mp_floating *mpf = mpf_found;
636
637 /*
638 * ACPI supports both logical (e.g. Hyper-Threading) and physical
639 * processors, where MPS only supports physical.
640 */
641 if (acpi_lapic && acpi_ioapic) {
642 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
643 return;
644 }
645 else if (acpi_lapic)
646 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
647
648 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
649 if (mpf->mpf_feature2 & (1<<7)) {
650 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
651 pic_mode = 1;
652 } else {
653 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
654 pic_mode = 0;
655 }
656
657 /*
658 * Now see if we need to read further.
659 */
660 if (mpf->mpf_feature1 != 0) {
661
662 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
663 construct_default_ISA_mptable(mpf->mpf_feature1);
664
665 } else if (mpf->mpf_physptr) {
666
667 /*
668 * Read the physical hardware table. Anything here will
669 * override the defaults.
670 */
671 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
672 smp_found_config = 0;
673 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
674 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
675 return;
676 }
677
678 #ifdef CONFIG_X86_IO_APIC
679 /*
680 * If there are no explicit MP IRQ entries, then we are
681 * broken. We set up most of the low 16 IO-APIC pins to
682 * ISA defaults and hope it will work.
683 */
684 if (!mp_irq_entries) {
685 struct mpc_config_bus bus;
686
687 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
688
689 bus.mpc_type = MP_BUS;
690 bus.mpc_busid = 0;
691 memcpy(bus.mpc_bustype, "ISA ", 6);
692 MP_bus_info(&bus);
693
694 construct_default_ioirq_mptable(0);
695 }
696 #endif
697 } else
698 BUG();
699
700 printk(KERN_INFO "Processors: %d\n", num_processors);
701 /*
702 * Only use the first configuration found.
703 */
704 }
705
706 static int __init smp_scan_config (unsigned long base, unsigned long length)
707 {
708 unsigned long *bp = phys_to_virt(base);
709 struct intel_mp_floating *mpf;
710
711 printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp,length);
712 if (sizeof(*mpf) != 16)
713 printk("Error: MPF size\n");
714
715 while (length > 0) {
716 mpf = (struct intel_mp_floating *)bp;
717 if ((*bp == SMP_MAGIC_IDENT) &&
718 (mpf->mpf_length == 1) &&
719 !mpf_checksum((unsigned char *)bp, 16) &&
720 ((mpf->mpf_specification == 1)
721 || (mpf->mpf_specification == 4)) ) {
722
723 smp_found_config = 1;
724 printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
725 mpf, virt_to_phys(mpf));
726 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
727 BOOTMEM_DEFAULT);
728 if (mpf->mpf_physptr) {
729 /*
730 * We cannot access to MPC table to compute
731 * table size yet, as only few megabytes from
732 * the bottom is mapped now.
733 * PC-9800's MPC table places on the very last
734 * of physical memory; so that simply reserving
735 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
736 * in reserve_bootmem.
737 */
738 unsigned long size = PAGE_SIZE;
739 unsigned long end = max_low_pfn * PAGE_SIZE;
740 if (mpf->mpf_physptr + size > end)
741 size = end - mpf->mpf_physptr;
742 reserve_bootmem(mpf->mpf_physptr, size,
743 BOOTMEM_DEFAULT);
744 }
745
746 mpf_found = mpf;
747 return 1;
748 }
749 bp += 4;
750 length -= 16;
751 }
752 return 0;
753 }
754
755 void __init find_smp_config (void)
756 {
757 unsigned int address;
758
759 /*
760 * FIXME: Linux assumes you have 640K of base ram..
761 * this continues the error...
762 *
763 * 1) Scan the bottom 1K for a signature
764 * 2) Scan the top 1K of base RAM
765 * 3) Scan the 64K of bios
766 */
767 if (smp_scan_config(0x0,0x400) ||
768 smp_scan_config(639*0x400,0x400) ||
769 smp_scan_config(0xF0000,0x10000))
770 return;
771 /*
772 * If it is an SMP machine we should know now, unless the
773 * configuration is in an EISA/MCA bus machine with an
774 * extended bios data area.
775 *
776 * there is a real-mode segmented pointer pointing to the
777 * 4K EBDA area at 0x40E, calculate and scan it here.
778 *
779 * NOTE! There are Linux loaders that will corrupt the EBDA
780 * area, and as such this kind of SMP config may be less
781 * trustworthy, simply because the SMP table may have been
782 * stomped on during early boot. These loaders are buggy and
783 * should be fixed.
784 *
785 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
786 */
787
788 address = get_bios_ebda();
789 if (address)
790 smp_scan_config(address, 0x400);
791 }
792
793 /* --------------------------------------------------------------------------
794 ACPI-based MP Configuration
795 -------------------------------------------------------------------------- */
796
797 #ifdef CONFIG_ACPI
798
799 void __init mp_register_lapic_address(u64 address)
800 {
801 mp_lapic_addr = (unsigned long) address;
802
803 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
804
805 if (boot_cpu_physical_apicid == -1U)
806 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
807
808 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
809 }
810
811 void __cpuinit mp_register_lapic (int id, u8 enabled)
812 {
813 if (MAX_APICS - id <= 0) {
814 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
815 id, MAX_APICS);
816 return;
817 }
818
819 if (!enabled) {
820 #ifdef CONFIG_X86_SMP
821 ++disabled_cpus;
822 #endif
823 return;
824 }
825
826 generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR)));
827 }
828
829 #ifdef CONFIG_X86_IO_APIC
830
831 #define MP_ISA_BUS 0
832 #define MP_MAX_IOAPIC_PIN 127
833
834 extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS];
835
836 static int mp_find_ioapic (int gsi)
837 {
838 int i = 0;
839
840 /* Find the IOAPIC that manages this GSI. */
841 for (i = 0; i < nr_ioapics; i++) {
842 if ((gsi >= mp_ioapic_routing[i].gsi_base)
843 && (gsi <= mp_ioapic_routing[i].gsi_end))
844 return i;
845 }
846
847 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
848
849 return -1;
850 }
851
852 static u8 uniq_ioapic_id(u8 id)
853 {
854 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
855 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
856 return io_apic_get_unique_id(nr_ioapics, id);
857 else
858 return id;
859 }
860
861 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
862 {
863 int idx = 0;
864
865 if (bad_ioapic(address))
866 return;
867
868 idx = nr_ioapics;
869
870 mp_ioapics[idx].mpc_type = MP_IOAPIC;
871 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
872 mp_ioapics[idx].mpc_apicaddr = address;
873
874 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
875 mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
876 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
877
878 /*
879 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
880 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
881 */
882 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
883 mp_ioapic_routing[idx].gsi_base = gsi_base;
884 mp_ioapic_routing[idx].gsi_end = gsi_base +
885 io_apic_get_redir_entries(idx);
886
887 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
888 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
889 mp_ioapics[idx].mpc_apicver,
890 mp_ioapics[idx].mpc_apicaddr,
891 mp_ioapic_routing[idx].gsi_base,
892 mp_ioapic_routing[idx].gsi_end);
893
894 nr_ioapics++;
895 }
896
897 void __init
898 mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
899 {
900 struct mpc_config_intsrc intsrc;
901 int ioapic = -1;
902 int pin = -1;
903
904 /*
905 * Convert 'gsi' to 'ioapic.pin'.
906 */
907 ioapic = mp_find_ioapic(gsi);
908 if (ioapic < 0)
909 return;
910 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
911
912 /*
913 * TBD: This check is for faulty timer entries, where the override
914 * erroneously sets the trigger to level, resulting in a HUGE
915 * increase of timer interrupts!
916 */
917 if ((bus_irq == 0) && (trigger == 3))
918 trigger = 1;
919
920 intsrc.mpc_type = MP_INTSRC;
921 intsrc.mpc_irqtype = mp_INT;
922 intsrc.mpc_irqflag = (trigger << 2) | polarity;
923 intsrc.mpc_srcbus = MP_ISA_BUS;
924 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
925 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
926 intsrc.mpc_dstirq = pin; /* INTIN# */
927
928 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
929 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
930 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
931 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
932
933 mp_irqs[mp_irq_entries] = intsrc;
934 if (++mp_irq_entries == MAX_IRQ_SOURCES)
935 panic("Max # of irq sources exceeded!\n");
936 }
937
938 int es7000_plat;
939
940 void __init mp_config_acpi_legacy_irqs (void)
941 {
942 struct mpc_config_intsrc intsrc;
943 int i = 0;
944 int ioapic = -1;
945
946 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
947 /*
948 * Fabricate the legacy ISA bus (bus #31).
949 */
950 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
951 #endif
952 set_bit(MP_ISA_BUS, mp_bus_not_pci);
953 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
954
955 /*
956 * Older generations of ES7000 have no legacy identity mappings
957 */
958 if (es7000_plat == 1)
959 return;
960
961 /*
962 * Locate the IOAPIC that manages the ISA IRQs (0-15).
963 */
964 ioapic = mp_find_ioapic(0);
965 if (ioapic < 0)
966 return;
967
968 intsrc.mpc_type = MP_INTSRC;
969 intsrc.mpc_irqflag = 0; /* Conforming */
970 intsrc.mpc_srcbus = MP_ISA_BUS;
971 #ifdef CONFIG_X86_IO_APIC
972 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
973 #endif
974 /*
975 * Use the default configuration for the IRQs 0-15. Unless
976 * overridden by (MADT) interrupt source override entries.
977 */
978 for (i = 0; i < 16; i++) {
979 int idx;
980
981 for (idx = 0; idx < mp_irq_entries; idx++) {
982 struct mpc_config_intsrc *irq = mp_irqs + idx;
983
984 /* Do we already have a mapping for this ISA IRQ? */
985 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
986 break;
987
988 /* Do we already have a mapping for this IOAPIC pin */
989 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
990 (irq->mpc_dstirq == i))
991 break;
992 }
993
994 if (idx != mp_irq_entries) {
995 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
996 continue; /* IRQ already used */
997 }
998
999 intsrc.mpc_irqtype = mp_INT;
1000 intsrc.mpc_srcbusirq = i; /* Identity mapped */
1001 intsrc.mpc_dstirq = i;
1002
1003 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1004 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1005 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1006 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
1007 intsrc.mpc_dstirq);
1008
1009 mp_irqs[mp_irq_entries] = intsrc;
1010 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1011 panic("Max # of irq sources exceeded!\n");
1012 }
1013 }
1014
1015 #define MAX_GSI_NUM 4096
1016 #define IRQ_COMPRESSION_START 64
1017
1018 int mp_register_gsi(u32 gsi, int triggering, int polarity)
1019 {
1020 int ioapic = -1;
1021 int ioapic_pin = 0;
1022 int idx, bit = 0;
1023 static int pci_irq = IRQ_COMPRESSION_START;
1024 /*
1025 * Mapping between Global System Interrupts, which
1026 * represent all possible interrupts, and IRQs
1027 * assigned to actual devices.
1028 */
1029 static int gsi_to_irq[MAX_GSI_NUM];
1030
1031 /* Don't set up the ACPI SCI because it's already set up */
1032 if (acpi_gbl_FADT.sci_interrupt == gsi)
1033 return gsi;
1034
1035 ioapic = mp_find_ioapic(gsi);
1036 if (ioapic < 0) {
1037 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1038 return gsi;
1039 }
1040
1041 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
1042
1043 if (ioapic_renumber_irq)
1044 gsi = ioapic_renumber_irq(ioapic, gsi);
1045
1046 /*
1047 * Avoid pin reprogramming. PRTs typically include entries
1048 * with redundant pin->gsi mappings (but unique PCI devices);
1049 * we only program the IOAPIC on the first.
1050 */
1051 bit = ioapic_pin % 32;
1052 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1053 if (idx > 3) {
1054 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1055 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1056 ioapic_pin);
1057 return gsi;
1058 }
1059 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1060 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1061 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1062 return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
1063 }
1064
1065 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1066
1067 /*
1068 * For GSI >= 64, use IRQ compression
1069 */
1070 if ((gsi >= IRQ_COMPRESSION_START)
1071 && (triggering == ACPI_LEVEL_SENSITIVE)) {
1072 /*
1073 * For PCI devices assign IRQs in order, avoiding gaps
1074 * due to unused I/O APIC pins.
1075 */
1076 int irq = gsi;
1077 if (gsi < MAX_GSI_NUM) {
1078 /*
1079 * Retain the VIA chipset work-around (gsi > 15), but
1080 * avoid a problem where the 8254 timer (IRQ0) is setup
1081 * via an override (so it's not on pin 0 of the ioapic),
1082 * and at the same time, the pin 0 interrupt is a PCI
1083 * type. The gsi > 15 test could cause these two pins
1084 * to be shared as IRQ0, and they are not shareable.
1085 * So test for this condition, and if necessary, avoid
1086 * the pin collision.
1087 */
1088 gsi = pci_irq++;
1089 /*
1090 * Don't assign IRQ used by ACPI SCI
1091 */
1092 if (gsi == acpi_gbl_FADT.sci_interrupt)
1093 gsi = pci_irq++;
1094 gsi_to_irq[irq] = gsi;
1095 } else {
1096 printk(KERN_ERR "GSI %u is too high\n", gsi);
1097 return gsi;
1098 }
1099 }
1100
1101 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1102 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
1103 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1104 return gsi;
1105 }
1106
1107 #endif /* CONFIG_X86_IO_APIC */
1108 #endif /* CONFIG_ACPI */
This page took 0.054317 seconds and 5 git commands to generate.