[PATCH] i386: Handle non existing APICs without panicing
[deliverable/linux.git] / arch / i386 / kernel / mpparse.c
CommitLineData
1da177e4
LT
1/*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
14 */
15
16#include <linux/mm.h>
1da177e4
LT
17#include <linux/init.h>
18#include <linux/acpi.h>
19#include <linux/delay.h>
20#include <linux/config.h>
21#include <linux/bootmem.h>
22#include <linux/smp_lock.h>
23#include <linux/kernel_stat.h>
24#include <linux/mc146818rtc.h>
25#include <linux/bitops.h>
26
27#include <asm/smp.h>
28#include <asm/acpi.h>
29#include <asm/mtrr.h>
30#include <asm/mpspec.h>
31#include <asm/io_apic.h>
32
33#include <mach_apic.h>
34#include <mach_mpparse.h>
35#include <bios_ebda.h>
36
37/* Have we found an MP table */
38int smp_found_config;
39unsigned int __initdata maxcpus = NR_CPUS;
40
e72c8585
AR
41#ifdef CONFIG_HOTPLUG_CPU
42#define CPU_HOTPLUG_ENABLED (1)
43#else
44#define CPU_HOTPLUG_ENABLED (0)
45#endif
46
1da177e4
LT
47/*
48 * Various Linux-internal data structures created from the
49 * MP-table.
50 */
51int apic_version [MAX_APICS];
52int mp_bus_id_to_type [MAX_MP_BUSSES];
53int mp_bus_id_to_node [MAX_MP_BUSSES];
54int mp_bus_id_to_local [MAX_MP_BUSSES];
55int quad_local_to_mp_bus_id [NR_CPUS/4][4];
56int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
57static int mp_current_pci_id;
58
59/* I/O APIC entries */
60struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
61
62/* # of MP IRQ source entries */
63struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
64
65/* MP IRQ source entries */
66int mp_irq_entries;
67
68int nr_ioapics;
69
70int pic_mode;
71unsigned long mp_lapic_addr;
72
911a62d4
VP
73unsigned int def_to_bigsmp = 0;
74
1da177e4
LT
75/* Processor that is doing the boot up */
76unsigned int boot_cpu_physical_apicid = -1U;
1da177e4 77/* Internal processor count */
9f40a72a 78static unsigned int __devinitdata num_processors;
1da177e4
LT
79
80/* Bitmask of physically existing CPUs */
81physid_mask_t phys_cpu_present_map;
82
83u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
84
85/*
86 * Intel MP BIOS table parsing routines:
87 */
88
89
90/*
91 * Checksum an MP configuration block.
92 */
93
94static int __init mpf_checksum(unsigned char *mp, int len)
95{
96 int sum = 0;
97
98 while (len--)
99 sum += *mp++;
100
101 return sum & 0xFF;
102}
103
104/*
105 * Have to match translation table entries to main table entries by counter
106 * hence the mpc_record variable .... can't see a less disgusting way of
107 * doing this ....
108 */
109
110static int mpc_record;
111static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
112
113#ifdef CONFIG_X86_NUMAQ
114static int MP_valid_apicid(int apicid, int version)
115{
116 return hweight_long(apicid & 0xf) == 1 && (apicid >> 4) != 0xf;
117}
118#else
119static int MP_valid_apicid(int apicid, int version)
120{
121 if (version >= 0x14)
122 return apicid < 0xff;
123 else
124 return apicid < 0xf;
125}
126#endif
127
9f40a72a 128static void __devinit MP_processor_info (struct mpc_config_processor *m)
1da177e4 129{
1299232b
AM
130 int ver, apicid;
131 physid_mask_t phys_cpu;
1da177e4
LT
132
133 if (!(m->mpc_cpuflag & CPU_ENABLED))
134 return;
135
136 apicid = mpc_apic_id(m, translation_table[mpc_record]);
137
138 if (m->mpc_featureflag&(1<<0))
139 Dprintk(" Floating point unit present.\n");
140 if (m->mpc_featureflag&(1<<7))
141 Dprintk(" Machine Exception supported.\n");
142 if (m->mpc_featureflag&(1<<8))
143 Dprintk(" 64 bit compare & exchange supported.\n");
144 if (m->mpc_featureflag&(1<<9))
145 Dprintk(" Internal APIC present.\n");
146 if (m->mpc_featureflag&(1<<11))
147 Dprintk(" SEP present.\n");
148 if (m->mpc_featureflag&(1<<12))
149 Dprintk(" MTRR present.\n");
150 if (m->mpc_featureflag&(1<<13))
151 Dprintk(" PGE present.\n");
152 if (m->mpc_featureflag&(1<<14))
153 Dprintk(" MCA present.\n");
154 if (m->mpc_featureflag&(1<<15))
155 Dprintk(" CMOV present.\n");
156 if (m->mpc_featureflag&(1<<16))
157 Dprintk(" PAT present.\n");
158 if (m->mpc_featureflag&(1<<17))
159 Dprintk(" PSE present.\n");
160 if (m->mpc_featureflag&(1<<18))
161 Dprintk(" PSN present.\n");
162 if (m->mpc_featureflag&(1<<19))
163 Dprintk(" Cache Line Flush Instruction present.\n");
164 /* 20 Reserved */
165 if (m->mpc_featureflag&(1<<21))
166 Dprintk(" Debug Trace and EMON Store present.\n");
167 if (m->mpc_featureflag&(1<<22))
168 Dprintk(" ACPI Thermal Throttle Registers present.\n");
169 if (m->mpc_featureflag&(1<<23))
170 Dprintk(" MMX present.\n");
171 if (m->mpc_featureflag&(1<<24))
172 Dprintk(" FXSR present.\n");
173 if (m->mpc_featureflag&(1<<25))
174 Dprintk(" XMM present.\n");
175 if (m->mpc_featureflag&(1<<26))
176 Dprintk(" Willamette New Instructions present.\n");
177 if (m->mpc_featureflag&(1<<27))
178 Dprintk(" Self Snoop present.\n");
179 if (m->mpc_featureflag&(1<<28))
180 Dprintk(" HT present.\n");
181 if (m->mpc_featureflag&(1<<29))
182 Dprintk(" Thermal Monitor present.\n");
183 /* 30, 31 Reserved */
184
185
186 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
187 Dprintk(" Bootup CPU\n");
188 boot_cpu_physical_apicid = m->mpc_apicid;
1da177e4
LT
189 }
190
1da177e4
LT
191 ver = m->mpc_apicver;
192
193 if (!MP_valid_apicid(apicid, ver)) {
194 printk(KERN_WARNING "Processor #%d INVALID. (Max ID: %d).\n",
195 m->mpc_apicid, MAX_APICS);
1da177e4
LT
196 return;
197 }
198
1da177e4
LT
199 /*
200 * Validate version
201 */
202 if (ver == 0x0) {
1299232b
AM
203 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
204 "fixing up to 0x10. (tell your hw vendor)\n",
205 m->mpc_apicid);
1da177e4
LT
206 ver = 0x10;
207 }
208 apic_version[m->mpc_apicid] = ver;
6c180d94
EB
209
210 phys_cpu = apicid_to_cpu_present(apicid);
211 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
212
213 if (num_processors >= NR_CPUS) {
214 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
215 " Processor ignored.\n", NR_CPUS);
216 return;
217 }
218
219 if (num_processors >= maxcpus) {
220 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
221 " Processor ignored.\n", maxcpus);
222 return;
223 }
224
225 cpu_set(num_processors, cpu_possible_map);
226 num_processors++;
227
e72c8585
AR
228 if (CPU_HOTPLUG_ENABLED || (num_processors > 8)) {
229 switch (boot_cpu_data.x86_vendor) {
230 case X86_VENDOR_INTEL:
231 if (!APIC_XAPIC(ver)) {
232 def_to_bigsmp = 0;
233 break;
234 }
235 /* If P4 and above fall through */
236 case X86_VENDOR_AMD:
237 def_to_bigsmp = 1;
238 }
239 }
1da177e4
LT
240 bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
241}
242
243static void __init MP_bus_info (struct mpc_config_bus *m)
244{
245 char str[7];
246
247 memcpy(str, m->mpc_bustype, 6);
248 str[6] = 0;
249
250 mpc_oem_bus_info(m, str, translation_table[mpc_record]);
251
252 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
253 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
254 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
255 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
256 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
257 mpc_oem_pci_bus(m, translation_table[mpc_record]);
258 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
259 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
260 mp_current_pci_id++;
261 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
262 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
263 } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
264 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
265 } else {
266 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
267 }
268}
269
270static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
271{
272 if (!(m->mpc_flags & MPC_APIC_USABLE))
273 return;
274
275 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
276 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
277 if (nr_ioapics >= MAX_IO_APICS) {
278 printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
279 MAX_IO_APICS, nr_ioapics);
280 panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
281 }
282 if (!m->mpc_apicaddr) {
283 printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
284 " found in MP table, skipping!\n");
285 return;
286 }
287 mp_ioapics[nr_ioapics] = *m;
288 nr_ioapics++;
289}
290
291static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
292{
293 mp_irqs [mp_irq_entries] = *m;
294 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
295 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
296 m->mpc_irqtype, m->mpc_irqflag & 3,
297 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
298 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
299 if (++mp_irq_entries == MAX_IRQ_SOURCES)
300 panic("Max # of irq sources exceeded!!\n");
301}
302
303static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
304{
305 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
306 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
307 m->mpc_irqtype, m->mpc_irqflag & 3,
308 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
309 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
310 /*
311 * Well it seems all SMP boards in existence
312 * use ExtINT/LVT1 == LINT0 and
313 * NMI/LVT2 == LINT1 - the following check
314 * will show us if this assumptions is false.
315 * Until then we do not have to add baggage.
316 */
317 if ((m->mpc_irqtype == mp_ExtINT) &&
318 (m->mpc_destapiclint != 0))
319 BUG();
320 if ((m->mpc_irqtype == mp_NMI) &&
321 (m->mpc_destapiclint != 1))
322 BUG();
323}
324
325#ifdef CONFIG_X86_NUMAQ
326static void __init MP_translation_info (struct mpc_config_translation *m)
327{
328 printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
329
330 if (mpc_record >= MAX_MPC_ENTRY)
331 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
332 else
333 translation_table[mpc_record] = m; /* stash this for later */
334 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
335 node_set_online(m->trans_quad);
336}
337
338/*
339 * Read/parse the MPC oem tables
340 */
341
342static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
343 unsigned short oemsize)
344{
345 int count = sizeof (*oemtable); /* the header size */
346 unsigned char *oemptr = ((unsigned char *)oemtable)+count;
347
348 mpc_record = 0;
349 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
350 if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
351 {
352 printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
353 oemtable->oem_signature[0],
354 oemtable->oem_signature[1],
355 oemtable->oem_signature[2],
356 oemtable->oem_signature[3]);
357 return;
358 }
359 if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
360 {
361 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
362 return;
363 }
364 while (count < oemtable->oem_length) {
365 switch (*oemptr) {
366 case MP_TRANSLATION:
367 {
368 struct mpc_config_translation *m=
369 (struct mpc_config_translation *)oemptr;
370 MP_translation_info(m);
371 oemptr += sizeof(*m);
372 count += sizeof(*m);
373 ++mpc_record;
374 break;
375 }
376 default:
377 {
378 printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
379 return;
380 }
381 }
382 }
383}
384
385static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
386 char *productid)
387{
388 if (strncmp(oem, "IBM NUMA", 8))
389 printk("Warning! May not be a NUMA-Q system!\n");
390 if (mpc->mpc_oemptr)
391 smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
392 mpc->mpc_oemsize);
393}
394#endif /* CONFIG_X86_NUMAQ */
395
396/*
397 * Read/parse the MPC
398 */
399
400static int __init smp_read_mpc(struct mp_config_table *mpc)
401{
402 char str[16];
403 char oem[10];
404 int count=sizeof(*mpc);
405 unsigned char *mpt=((unsigned char *)mpc)+count;
406
407 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
408 printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
409 *(u32 *)mpc->mpc_signature);
410 return 0;
411 }
412 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
413 printk(KERN_ERR "SMP mptable: checksum error!\n");
414 return 0;
415 }
416 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
417 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
418 mpc->mpc_spec);
419 return 0;
420 }
421 if (!mpc->mpc_lapic) {
422 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
423 return 0;
424 }
425 memcpy(oem,mpc->mpc_oem,8);
426 oem[8]=0;
427 printk(KERN_INFO "OEM ID: %s ",oem);
428
429 memcpy(str,mpc->mpc_productid,12);
430 str[12]=0;
431 printk("Product ID: %s ",str);
432
433 mps_oem_check(mpc, oem, str);
434
435 printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
436
437 /*
438 * Save the local APIC address (it might be non-default) -- but only
439 * if we're not using ACPI.
440 */
441 if (!acpi_lapic)
442 mp_lapic_addr = mpc->mpc_lapic;
443
444 /*
445 * Now process the configuration blocks.
446 */
447 mpc_record = 0;
448 while (count < mpc->mpc_length) {
449 switch(*mpt) {
450 case MP_PROCESSOR:
451 {
452 struct mpc_config_processor *m=
453 (struct mpc_config_processor *)mpt;
454 /* ACPI may have already provided this data */
455 if (!acpi_lapic)
456 MP_processor_info(m);
457 mpt += sizeof(*m);
458 count += sizeof(*m);
459 break;
460 }
461 case MP_BUS:
462 {
463 struct mpc_config_bus *m=
464 (struct mpc_config_bus *)mpt;
465 MP_bus_info(m);
466 mpt += sizeof(*m);
467 count += sizeof(*m);
468 break;
469 }
470 case MP_IOAPIC:
471 {
472 struct mpc_config_ioapic *m=
473 (struct mpc_config_ioapic *)mpt;
474 MP_ioapic_info(m);
475 mpt+=sizeof(*m);
476 count+=sizeof(*m);
477 break;
478 }
479 case MP_INTSRC:
480 {
481 struct mpc_config_intsrc *m=
482 (struct mpc_config_intsrc *)mpt;
483
484 MP_intsrc_info(m);
485 mpt+=sizeof(*m);
486 count+=sizeof(*m);
487 break;
488 }
489 case MP_LINTSRC:
490 {
491 struct mpc_config_lintsrc *m=
492 (struct mpc_config_lintsrc *)mpt;
493 MP_lintsrc_info(m);
494 mpt+=sizeof(*m);
495 count+=sizeof(*m);
496 break;
497 }
498 default:
499 {
500 count = mpc->mpc_length;
501 break;
502 }
503 }
504 ++mpc_record;
505 }
506 clustered_apic_check();
507 if (!num_processors)
508 printk(KERN_ERR "SMP mptable: no processors registered!\n");
509 return num_processors;
510}
511
512static int __init ELCR_trigger(unsigned int irq)
513{
514 unsigned int port;
515
516 port = 0x4d0 + (irq >> 3);
517 return (inb(port) >> (irq & 7)) & 1;
518}
519
520static void __init construct_default_ioirq_mptable(int mpc_default_type)
521{
522 struct mpc_config_intsrc intsrc;
523 int i;
524 int ELCR_fallback = 0;
525
526 intsrc.mpc_type = MP_INTSRC;
527 intsrc.mpc_irqflag = 0; /* conforming */
528 intsrc.mpc_srcbus = 0;
529 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
530
531 intsrc.mpc_irqtype = mp_INT;
532
533 /*
534 * If true, we have an ISA/PCI system with no IRQ entries
535 * in the MP table. To prevent the PCI interrupts from being set up
536 * incorrectly, we try to use the ELCR. The sanity check to see if
537 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
538 * never be level sensitive, so we simply see if the ELCR agrees.
539 * If it does, we assume it's valid.
540 */
541 if (mpc_default_type == 5) {
542 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
543
544 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
545 printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
546 else {
547 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
548 ELCR_fallback = 1;
549 }
550 }
551
552 for (i = 0; i < 16; i++) {
553 switch (mpc_default_type) {
554 case 2:
555 if (i == 0 || i == 13)
556 continue; /* IRQ0 & IRQ13 not connected */
557 /* fall through */
558 default:
559 if (i == 2)
560 continue; /* IRQ2 is never connected */
561 }
562
563 if (ELCR_fallback) {
564 /*
565 * If the ELCR indicates a level-sensitive interrupt, we
566 * copy that information over to the MP table in the
567 * irqflag field (level sensitive, active high polarity).
568 */
569 if (ELCR_trigger(i))
570 intsrc.mpc_irqflag = 13;
571 else
572 intsrc.mpc_irqflag = 0;
573 }
574
575 intsrc.mpc_srcbusirq = i;
576 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
577 MP_intsrc_info(&intsrc);
578 }
579
580 intsrc.mpc_irqtype = mp_ExtINT;
581 intsrc.mpc_srcbusirq = 0;
582 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
583 MP_intsrc_info(&intsrc);
584}
585
586static inline void __init construct_default_ISA_mptable(int mpc_default_type)
587{
588 struct mpc_config_processor processor;
589 struct mpc_config_bus bus;
590 struct mpc_config_ioapic ioapic;
591 struct mpc_config_lintsrc lintsrc;
592 int linttypes[2] = { mp_ExtINT, mp_NMI };
593 int i;
594
595 /*
596 * local APIC has default address
597 */
598 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
599
600 /*
601 * 2 CPUs, numbered 0 & 1.
602 */
603 processor.mpc_type = MP_PROCESSOR;
604 /* Either an integrated APIC or a discrete 82489DX. */
605 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
606 processor.mpc_cpuflag = CPU_ENABLED;
607 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
608 (boot_cpu_data.x86_model << 4) |
609 boot_cpu_data.x86_mask;
610 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
611 processor.mpc_reserved[0] = 0;
612 processor.mpc_reserved[1] = 0;
613 for (i = 0; i < 2; i++) {
614 processor.mpc_apicid = i;
615 MP_processor_info(&processor);
616 }
617
618 bus.mpc_type = MP_BUS;
619 bus.mpc_busid = 0;
620 switch (mpc_default_type) {
621 default:
622 printk("???\n");
623 printk(KERN_ERR "Unknown standard configuration %d\n",
624 mpc_default_type);
625 /* fall through */
626 case 1:
627 case 5:
628 memcpy(bus.mpc_bustype, "ISA ", 6);
629 break;
630 case 2:
631 case 6:
632 case 3:
633 memcpy(bus.mpc_bustype, "EISA ", 6);
634 break;
635 case 4:
636 case 7:
637 memcpy(bus.mpc_bustype, "MCA ", 6);
638 }
639 MP_bus_info(&bus);
640 if (mpc_default_type > 4) {
641 bus.mpc_busid = 1;
642 memcpy(bus.mpc_bustype, "PCI ", 6);
643 MP_bus_info(&bus);
644 }
645
646 ioapic.mpc_type = MP_IOAPIC;
647 ioapic.mpc_apicid = 2;
648 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
649 ioapic.mpc_flags = MPC_APIC_USABLE;
650 ioapic.mpc_apicaddr = 0xFEC00000;
651 MP_ioapic_info(&ioapic);
652
653 /*
654 * We set up most of the low 16 IO-APIC pins according to MPS rules.
655 */
656 construct_default_ioirq_mptable(mpc_default_type);
657
658 lintsrc.mpc_type = MP_LINTSRC;
659 lintsrc.mpc_irqflag = 0; /* conforming */
660 lintsrc.mpc_srcbusid = 0;
661 lintsrc.mpc_srcbusirq = 0;
662 lintsrc.mpc_destapic = MP_APIC_ALL;
663 for (i = 0; i < 2; i++) {
664 lintsrc.mpc_irqtype = linttypes[i];
665 lintsrc.mpc_destapiclint = i;
666 MP_lintsrc_info(&lintsrc);
667 }
668}
669
670static struct intel_mp_floating *mpf_found;
671
672/*
673 * Scan the memory blocks for an SMP configuration block.
674 */
675void __init get_smp_config (void)
676{
677 struct intel_mp_floating *mpf = mpf_found;
678
679 /*
1da177e4
LT
680 * ACPI supports both logical (e.g. Hyper-Threading) and physical
681 * processors, where MPS only supports physical.
682 */
683 if (acpi_lapic && acpi_ioapic) {
684 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
685 return;
686 }
687 else if (acpi_lapic)
688 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
689
690 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
691 if (mpf->mpf_feature2 & (1<<7)) {
692 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
693 pic_mode = 1;
694 } else {
695 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
696 pic_mode = 0;
697 }
698
699 /*
700 * Now see if we need to read further.
701 */
702 if (mpf->mpf_feature1 != 0) {
703
704 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
705 construct_default_ISA_mptable(mpf->mpf_feature1);
706
707 } else if (mpf->mpf_physptr) {
708
709 /*
710 * Read the physical hardware table. Anything here will
711 * override the defaults.
712 */
7d4c8e56 713 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
1da177e4
LT
714 smp_found_config = 0;
715 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
716 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
717 return;
718 }
719 /*
720 * If there are no explicit MP IRQ entries, then we are
721 * broken. We set up most of the low 16 IO-APIC pins to
722 * ISA defaults and hope it will work.
723 */
724 if (!mp_irq_entries) {
725 struct mpc_config_bus bus;
726
727 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
728
729 bus.mpc_type = MP_BUS;
730 bus.mpc_busid = 0;
731 memcpy(bus.mpc_bustype, "ISA ", 6);
732 MP_bus_info(&bus);
733
734 construct_default_ioirq_mptable(0);
735 }
736
737 } else
738 BUG();
739
740 printk(KERN_INFO "Processors: %d\n", num_processors);
741 /*
742 * Only use the first configuration found.
743 */
744}
745
746static int __init smp_scan_config (unsigned long base, unsigned long length)
747{
748 unsigned long *bp = phys_to_virt(base);
749 struct intel_mp_floating *mpf;
750
751 Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
752 if (sizeof(*mpf) != 16)
753 printk("Error: MPF size\n");
754
755 while (length > 0) {
756 mpf = (struct intel_mp_floating *)bp;
757 if ((*bp == SMP_MAGIC_IDENT) &&
758 (mpf->mpf_length == 1) &&
759 !mpf_checksum((unsigned char *)bp, 16) &&
760 ((mpf->mpf_specification == 1)
761 || (mpf->mpf_specification == 4)) ) {
762
763 smp_found_config = 1;
764 printk(KERN_INFO "found SMP MP-table at %08lx\n",
765 virt_to_phys(mpf));
766 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
767 if (mpf->mpf_physptr) {
768 /*
769 * We cannot access to MPC table to compute
770 * table size yet, as only few megabytes from
771 * the bottom is mapped now.
772 * PC-9800's MPC table places on the very last
773 * of physical memory; so that simply reserving
774 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
775 * in reserve_bootmem.
776 */
777 unsigned long size = PAGE_SIZE;
778 unsigned long end = max_low_pfn * PAGE_SIZE;
779 if (mpf->mpf_physptr + size > end)
780 size = end - mpf->mpf_physptr;
781 reserve_bootmem(mpf->mpf_physptr, size);
782 }
783
784 mpf_found = mpf;
785 return 1;
786 }
787 bp += 4;
788 length -= 16;
789 }
790 return 0;
791}
792
793void __init find_smp_config (void)
794{
795 unsigned int address;
796
797 /*
798 * FIXME: Linux assumes you have 640K of base ram..
799 * this continues the error...
800 *
801 * 1) Scan the bottom 1K for a signature
802 * 2) Scan the top 1K of base RAM
803 * 3) Scan the 64K of bios
804 */
805 if (smp_scan_config(0x0,0x400) ||
806 smp_scan_config(639*0x400,0x400) ||
807 smp_scan_config(0xF0000,0x10000))
808 return;
809 /*
810 * If it is an SMP machine we should know now, unless the
811 * configuration is in an EISA/MCA bus machine with an
812 * extended bios data area.
813 *
814 * there is a real-mode segmented pointer pointing to the
815 * 4K EBDA area at 0x40E, calculate and scan it here.
816 *
817 * NOTE! There are Linux loaders that will corrupt the EBDA
818 * area, and as such this kind of SMP config may be less
819 * trustworthy, simply because the SMP table may have been
820 * stomped on during early boot. These loaders are buggy and
821 * should be fixed.
822 *
823 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
824 */
825
826 address = get_bios_ebda();
827 if (address)
828 smp_scan_config(address, 0x400);
829}
830
831/* --------------------------------------------------------------------------
832 ACPI-based MP Configuration
833 -------------------------------------------------------------------------- */
834
888ba6c6 835#ifdef CONFIG_ACPI
1da177e4
LT
836
837void __init mp_register_lapic_address (
838 u64 address)
839{
840 mp_lapic_addr = (unsigned long) address;
841
842 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
843
844 if (boot_cpu_physical_apicid == -1U)
845 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
846
847 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
848}
849
850
9f40a72a 851void __devinit mp_register_lapic (
1da177e4
LT
852 u8 id,
853 u8 enabled)
854{
855 struct mpc_config_processor processor;
856 int boot_cpu = 0;
857
858 if (MAX_APICS - id <= 0) {
859 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
860 id, MAX_APICS);
861 return;
862 }
863
864 if (id == boot_cpu_physical_apicid)
865 boot_cpu = 1;
866
867 processor.mpc_type = MP_PROCESSOR;
868 processor.mpc_apicid = id;
869 processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
870 processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
871 processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
872 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
873 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
874 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
875 processor.mpc_reserved[0] = 0;
876 processor.mpc_reserved[1] = 0;
877
878 MP_processor_info(&processor);
879}
880
8466361a 881#ifdef CONFIG_X86_IO_APIC
1da177e4
LT
882
883#define MP_ISA_BUS 0
884#define MP_MAX_IOAPIC_PIN 127
885
886static struct mp_ioapic_routing {
887 int apic_id;
888 int gsi_base;
889 int gsi_end;
890 u32 pin_programmed[4];
891} mp_ioapic_routing[MAX_IO_APICS];
892
893
894static int mp_find_ioapic (
895 int gsi)
896{
897 int i = 0;
898
899 /* Find the IOAPIC that manages this GSI. */
900 for (i = 0; i < nr_ioapics; i++) {
901 if ((gsi >= mp_ioapic_routing[i].gsi_base)
902 && (gsi <= mp_ioapic_routing[i].gsi_end))
903 return i;
904 }
905
906 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
907
908 return -1;
909}
910
911
912void __init mp_register_ioapic (
913 u8 id,
914 u32 address,
915 u32 gsi_base)
916{
917 int idx = 0;
6070f9ec 918 int tmpid;
1da177e4
LT
919
920 if (nr_ioapics >= MAX_IO_APICS) {
921 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
922 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
923 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
924 }
925 if (!address) {
926 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
927 " found in MADT table, skipping!\n");
928 return;
929 }
930
931 idx = nr_ioapics++;
932
933 mp_ioapics[idx].mpc_type = MP_IOAPIC;
934 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
935 mp_ioapics[idx].mpc_apicaddr = address;
936
937 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
ca05fea6 938 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15))
6070f9ec 939 tmpid = io_apic_get_unique_id(idx, id);
ca05fea6 940 else
6070f9ec
AD
941 tmpid = id;
942 if (tmpid == -1) {
943 nr_ioapics--;
944 return;
945 }
946 mp_ioapics[idx].mpc_apicid = tmpid;
1da177e4
LT
947 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
948
949 /*
950 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
951 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
952 */
953 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
954 mp_ioapic_routing[idx].gsi_base = gsi_base;
955 mp_ioapic_routing[idx].gsi_end = gsi_base +
956 io_apic_get_redir_entries(idx);
957
958 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
959 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
960 mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
961 mp_ioapic_routing[idx].gsi_base,
962 mp_ioapic_routing[idx].gsi_end);
963
964 return;
965}
966
967
968void __init mp_override_legacy_irq (
969 u8 bus_irq,
970 u8 polarity,
971 u8 trigger,
972 u32 gsi)
973{
974 struct mpc_config_intsrc intsrc;
975 int ioapic = -1;
976 int pin = -1;
977
978 /*
979 * Convert 'gsi' to 'ioapic.pin'.
980 */
981 ioapic = mp_find_ioapic(gsi);
982 if (ioapic < 0)
983 return;
984 pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
985
986 /*
987 * TBD: This check is for faulty timer entries, where the override
988 * erroneously sets the trigger to level, resulting in a HUGE
989 * increase of timer interrupts!
990 */
991 if ((bus_irq == 0) && (trigger == 3))
992 trigger = 1;
993
994 intsrc.mpc_type = MP_INTSRC;
995 intsrc.mpc_irqtype = mp_INT;
996 intsrc.mpc_irqflag = (trigger << 2) | polarity;
997 intsrc.mpc_srcbus = MP_ISA_BUS;
998 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
999 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
1000 intsrc.mpc_dstirq = pin; /* INTIN# */
1001
1002 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
1003 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1004 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1005 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
1006
1007 mp_irqs[mp_irq_entries] = intsrc;
1008 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1009 panic("Max # of irq sources exceeded!\n");
1010
1011 return;
1012}
1013
1014int es7000_plat;
1015
1016void __init mp_config_acpi_legacy_irqs (void)
1017{
1018 struct mpc_config_intsrc intsrc;
1019 int i = 0;
1020 int ioapic = -1;
1021
1022 /*
1023 * Fabricate the legacy ISA bus (bus #31).
1024 */
1025 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
1026 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
1027
1028 /*
1029 * Older generations of ES7000 have no legacy identity mappings
1030 */
1031 if (es7000_plat == 1)
1032 return;
1033
1034 /*
1035 * Locate the IOAPIC that manages the ISA IRQs (0-15).
1036 */
1037 ioapic = mp_find_ioapic(0);
1038 if (ioapic < 0)
1039 return;
1040
1041 intsrc.mpc_type = MP_INTSRC;
1042 intsrc.mpc_irqflag = 0; /* Conforming */
1043 intsrc.mpc_srcbus = MP_ISA_BUS;
1044 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
1045
1046 /*
1047 * Use the default configuration for the IRQs 0-15. Unless
1048 * overriden by (MADT) interrupt source override entries.
1049 */
1050 for (i = 0; i < 16; i++) {
1051 int idx;
1052
1053 for (idx = 0; idx < mp_irq_entries; idx++) {
1054 struct mpc_config_intsrc *irq = mp_irqs + idx;
1055
1056 /* Do we already have a mapping for this ISA IRQ? */
1057 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
1058 break;
1059
1060 /* Do we already have a mapping for this IOAPIC pin */
1061 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
1062 (irq->mpc_dstirq == i))
1063 break;
1064 }
1065
1066 if (idx != mp_irq_entries) {
1067 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
1068 continue; /* IRQ already used */
1069 }
1070
1071 intsrc.mpc_irqtype = mp_INT;
1072 intsrc.mpc_srcbusirq = i; /* Identity mapped */
1073 intsrc.mpc_dstirq = i;
1074
1075 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1076 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1077 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1078 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
1079 intsrc.mpc_dstirq);
1080
1081 mp_irqs[mp_irq_entries] = intsrc;
1082 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1083 panic("Max # of irq sources exceeded!\n");
1084 }
1085}
1086
c434b7a6
NP
1087#define MAX_GSI_NUM 4096
1088
cb654695 1089int mp_register_gsi (u32 gsi, int triggering, int polarity)
1da177e4
LT
1090{
1091 int ioapic = -1;
1092 int ioapic_pin = 0;
1093 int idx, bit = 0;
c434b7a6
NP
1094 static int pci_irq = 16;
1095 /*
1096 * Mapping between Global System Interrups, which
1097 * represent all possible interrupts, and IRQs
1098 * assigned to actual devices.
1099 */
1100 static int gsi_to_irq[MAX_GSI_NUM];
1da177e4 1101
1da177e4
LT
1102 /* Don't set up the ACPI SCI because it's already set up */
1103 if (acpi_fadt.sci_int == gsi)
1104 return gsi;
1da177e4
LT
1105
1106 ioapic = mp_find_ioapic(gsi);
1107 if (ioapic < 0) {
1108 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
1109 return gsi;
1110 }
1111
1112 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
1113
1114 if (ioapic_renumber_irq)
1115 gsi = ioapic_renumber_irq(ioapic, gsi);
1116
1117 /*
1118 * Avoid pin reprogramming. PRTs typically include entries
1119 * with redundant pin->gsi mappings (but unique PCI devices);
1120 * we only program the IOAPIC on the first.
1121 */
1122 bit = ioapic_pin % 32;
1123 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1124 if (idx > 3) {
1125 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1126 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1127 ioapic_pin);
1128 return gsi;
1129 }
1130 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1131 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1132 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
c434b7a6 1133 return gsi_to_irq[gsi];
1da177e4
LT
1134 }
1135
1136 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1137
cb654695 1138 if (triggering == ACPI_LEVEL_SENSITIVE) {
c434b7a6
NP
1139 /*
1140 * For PCI devices assign IRQs in order, avoiding gaps
1141 * due to unused I/O APIC pins.
1142 */
1143 int irq = gsi;
1144 if (gsi < MAX_GSI_NUM) {
e1afc3f5
NP
1145 if (gsi > 15)
1146 gsi = pci_irq++;
e1afc3f5
NP
1147 /*
1148 * Don't assign IRQ used by ACPI SCI
1149 */
1150 if (gsi == acpi_fadt.sci_int)
1151 gsi = pci_irq++;
c434b7a6
NP
1152 gsi_to_irq[irq] = gsi;
1153 } else {
1154 printk(KERN_ERR "GSI %u is too high\n", gsi);
1155 return gsi;
1156 }
1157 }
1158
1da177e4 1159 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
cb654695
LB
1160 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
1161 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1da177e4
LT
1162 return gsi;
1163}
1164
8466361a 1165#endif /* CONFIG_X86_IO_APIC */
888ba6c6 1166#endif /* CONFIG_ACPI */
This page took 0.136184 seconds and 5 git commands to generate.