Merge master.kernel.org:/pub/scm/linux/kernel/git/sam/kbuild
[deliverable/linux.git] / arch / ia64 / kernel / acpi.c
1 /*
2 * acpi.c - Architecture-Specific Low-Level ACPI Support
3 *
4 * Copyright (C) 1999 VA Linux Systems
5 * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
6 * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Copyright (C) 2000 Intel Corp.
9 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
10 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
11 * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
12 * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
13 * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
14 * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
15 *
16 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 *
32 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
33 */
34
35 #include <linux/config.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39 #include <linux/sched.h>
40 #include <linux/smp.h>
41 #include <linux/string.h>
42 #include <linux/types.h>
43 #include <linux/irq.h>
44 #include <linux/acpi.h>
45 #include <linux/efi.h>
46 #include <linux/mmzone.h>
47 #include <linux/nodemask.h>
48 #include <asm/io.h>
49 #include <asm/iosapic.h>
50 #include <asm/machvec.h>
51 #include <asm/page.h>
52 #include <asm/system.h>
53 #include <asm/numa.h>
54 #include <asm/sal.h>
55 #include <asm/cyclone.h>
56
57 #define BAD_MADT_ENTRY(entry, end) ( \
58 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
59 ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
60
61 #define PREFIX "ACPI: "
62
63 void (*pm_idle) (void);
64 EXPORT_SYMBOL(pm_idle);
65 void (*pm_power_off) (void);
66 EXPORT_SYMBOL(pm_power_off);
67
68 unsigned char acpi_kbd_controller_present = 1;
69 unsigned char acpi_legacy_devices;
70
71 static unsigned int __initdata acpi_madt_rev;
72
73 unsigned int acpi_cpei_override;
74 unsigned int acpi_cpei_phys_cpuid;
75
76 #define MAX_SAPICS 256
77 u16 ia64_acpiid_to_sapicid[MAX_SAPICS] =
78 { [0 ... MAX_SAPICS - 1] = -1 };
79 EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
80
81 const char *
82 acpi_get_sysname (void)
83 {
84 #ifdef CONFIG_IA64_GENERIC
85 unsigned long rsdp_phys;
86 struct acpi20_table_rsdp *rsdp;
87 struct acpi_table_xsdt *xsdt;
88 struct acpi_table_header *hdr;
89
90 rsdp_phys = acpi_find_rsdp();
91 if (!rsdp_phys) {
92 printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n");
93 return "dig";
94 }
95
96 rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys);
97 if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
98 printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
99 return "dig";
100 }
101
102 xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address);
103 hdr = &xsdt->header;
104 if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
105 printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
106 return "dig";
107 }
108
109 if (!strcmp(hdr->oem_id, "HP")) {
110 return "hpzx1";
111 }
112 else if (!strcmp(hdr->oem_id, "SGI")) {
113 return "sn2";
114 }
115
116 return "dig";
117 #else
118 # if defined (CONFIG_IA64_HP_SIM)
119 return "hpsim";
120 # elif defined (CONFIG_IA64_HP_ZX1)
121 return "hpzx1";
122 # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
123 return "hpzx1_swiotlb";
124 # elif defined (CONFIG_IA64_SGI_SN2)
125 return "sn2";
126 # elif defined (CONFIG_IA64_DIG)
127 return "dig";
128 # else
129 # error Unknown platform. Fix acpi.c.
130 # endif
131 #endif
132 }
133
134 #ifdef CONFIG_ACPI_BOOT
135
136 #define ACPI_MAX_PLATFORM_INTERRUPTS 256
137
138 /* Array to record platform interrupt vectors for generic interrupt routing. */
139 int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
140 [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
141 };
142
143 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
144
145 /*
146 * Interrupt routing API for device drivers. Provides interrupt vector for
147 * a generic platform event. Currently only CPEI is implemented.
148 */
149 int
150 acpi_request_vector (u32 int_type)
151 {
152 int vector = -1;
153
154 if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
155 /* corrected platform error interrupt */
156 vector = platform_intr_list[int_type];
157 } else
158 printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n");
159 return vector;
160 }
161
162 char *
163 __acpi_map_table (unsigned long phys_addr, unsigned long size)
164 {
165 return __va(phys_addr);
166 }
167
168 /* --------------------------------------------------------------------------
169 Boot-time Table Parsing
170 -------------------------------------------------------------------------- */
171
172 static int total_cpus __initdata;
173 static int available_cpus __initdata;
174 struct acpi_table_madt * acpi_madt __initdata;
175 static u8 has_8259;
176
177
178 static int __init
179 acpi_parse_lapic_addr_ovr (
180 acpi_table_entry_header *header, const unsigned long end)
181 {
182 struct acpi_table_lapic_addr_ovr *lapic;
183
184 lapic = (struct acpi_table_lapic_addr_ovr *) header;
185
186 if (BAD_MADT_ENTRY(lapic, end))
187 return -EINVAL;
188
189 if (lapic->address) {
190 iounmap(ipi_base_addr);
191 ipi_base_addr = ioremap(lapic->address, 0);
192 }
193 return 0;
194 }
195
196
197 static int __init
198 acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end)
199 {
200 struct acpi_table_lsapic *lsapic;
201
202 lsapic = (struct acpi_table_lsapic *) header;
203
204 if (BAD_MADT_ENTRY(lsapic, end))
205 return -EINVAL;
206
207 if (lsapic->flags.enabled) {
208 #ifdef CONFIG_SMP
209 smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
210 #endif
211 ia64_acpiid_to_sapicid[lsapic->acpi_id] = (lsapic->id << 8) | lsapic->eid;
212 ++available_cpus;
213 }
214
215 total_cpus++;
216 return 0;
217 }
218
219
220 static int __init
221 acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end)
222 {
223 struct acpi_table_lapic_nmi *lacpi_nmi;
224
225 lacpi_nmi = (struct acpi_table_lapic_nmi*) header;
226
227 if (BAD_MADT_ENTRY(lacpi_nmi, end))
228 return -EINVAL;
229
230 /* TBD: Support lapic_nmi entries */
231 return 0;
232 }
233
234
235 static int __init
236 acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end)
237 {
238 struct acpi_table_iosapic *iosapic;
239
240 iosapic = (struct acpi_table_iosapic *) header;
241
242 if (BAD_MADT_ENTRY(iosapic, end))
243 return -EINVAL;
244
245 return iosapic_init(iosapic->address, iosapic->global_irq_base);
246 }
247
248
249 static int __init
250 acpi_parse_plat_int_src (
251 acpi_table_entry_header *header, const unsigned long end)
252 {
253 struct acpi_table_plat_int_src *plintsrc;
254 int vector;
255
256 plintsrc = (struct acpi_table_plat_int_src *) header;
257
258 if (BAD_MADT_ENTRY(plintsrc, end))
259 return -EINVAL;
260
261 /*
262 * Get vector assignment for this interrupt, set attributes,
263 * and program the IOSAPIC routing table.
264 */
265 vector = iosapic_register_platform_intr(plintsrc->type,
266 plintsrc->global_irq,
267 plintsrc->iosapic_vector,
268 plintsrc->eid,
269 plintsrc->id,
270 (plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
271 (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
272
273 platform_intr_list[plintsrc->type] = vector;
274 if (acpi_madt_rev > 1) {
275 acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag;
276 }
277
278 /*
279 * Save the physical id, so we can check when its being removed
280 */
281 acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff;
282
283 return 0;
284 }
285
286
287 unsigned int can_cpei_retarget(void)
288 {
289 extern int cpe_vector;
290
291 /*
292 * Only if CPEI is supported and the override flag
293 * is present, otherwise return that its re-targettable
294 * if we are in polling mode.
295 */
296 if (cpe_vector > 0 && !acpi_cpei_override)
297 return 0;
298 else
299 return 1;
300 }
301
302 unsigned int is_cpu_cpei_target(unsigned int cpu)
303 {
304 unsigned int logical_id;
305
306 logical_id = cpu_logical_id(acpi_cpei_phys_cpuid);
307
308 if (logical_id == cpu)
309 return 1;
310 else
311 return 0;
312 }
313
314 void set_cpei_target_cpu(unsigned int cpu)
315 {
316 acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
317 }
318
319 unsigned int get_cpei_target_cpu(void)
320 {
321 return acpi_cpei_phys_cpuid;
322 }
323
324 static int __init
325 acpi_parse_int_src_ovr (
326 acpi_table_entry_header *header, const unsigned long end)
327 {
328 struct acpi_table_int_src_ovr *p;
329
330 p = (struct acpi_table_int_src_ovr *) header;
331
332 if (BAD_MADT_ENTRY(p, end))
333 return -EINVAL;
334
335 iosapic_override_isa_irq(p->bus_irq, p->global_irq,
336 (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
337 (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
338 return 0;
339 }
340
341
342 static int __init
343 acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end)
344 {
345 struct acpi_table_nmi_src *nmi_src;
346
347 nmi_src = (struct acpi_table_nmi_src*) header;
348
349 if (BAD_MADT_ENTRY(nmi_src, end))
350 return -EINVAL;
351
352 /* TBD: Support nimsrc entries */
353 return 0;
354 }
355
356 static void __init
357 acpi_madt_oem_check (char *oem_id, char *oem_table_id)
358 {
359 if (!strncmp(oem_id, "IBM", 3) &&
360 (!strncmp(oem_table_id, "SERMOW", 6))) {
361
362 /*
363 * Unfortunately ITC_DRIFT is not yet part of the
364 * official SAL spec, so the ITC_DRIFT bit is not
365 * set by the BIOS on this hardware.
366 */
367 sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
368
369 cyclone_setup();
370 }
371 }
372
373 static int __init
374 acpi_parse_madt (unsigned long phys_addr, unsigned long size)
375 {
376 if (!phys_addr || !size)
377 return -EINVAL;
378
379 acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
380
381 acpi_madt_rev = acpi_madt->header.revision;
382
383 /* remember the value for reference after free_initmem() */
384 #ifdef CONFIG_ITANIUM
385 has_8259 = 1; /* Firmware on old Itanium systems is broken */
386 #else
387 has_8259 = acpi_madt->flags.pcat_compat;
388 #endif
389 iosapic_system_init(has_8259);
390
391 /* Get base address of IPI Message Block */
392
393 if (acpi_madt->lapic_address)
394 ipi_base_addr = ioremap(acpi_madt->lapic_address, 0);
395
396 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
397
398 acpi_madt_oem_check(acpi_madt->header.oem_id,
399 acpi_madt->header.oem_table_id);
400
401 return 0;
402 }
403
404
405 #ifdef CONFIG_ACPI_NUMA
406
407 #undef SLIT_DEBUG
408
409 #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
410
411 static int __initdata srat_num_cpus; /* number of cpus */
412 static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
413 #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
414 #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
415 /* maps to convert between proximity domain and logical node ID */
416 int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
417 int __initdata nid_to_pxm_map[MAX_NUMNODES];
418 static struct acpi_table_slit __initdata *slit_table;
419
420 /*
421 * ACPI 2.0 SLIT (System Locality Information Table)
422 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
423 */
424 void __init
425 acpi_numa_slit_init (struct acpi_table_slit *slit)
426 {
427 u32 len;
428
429 len = sizeof(struct acpi_table_header) + 8
430 + slit->localities * slit->localities;
431 if (slit->header.length != len) {
432 printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
433 len, slit->header.length);
434 memset(numa_slit, 10, sizeof(numa_slit));
435 return;
436 }
437 slit_table = slit;
438 }
439
440 void __init
441 acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa)
442 {
443 /* record this node in proximity bitmap */
444 pxm_bit_set(pa->proximity_domain);
445
446 node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid);
447 /* nid should be overridden as logical node id later */
448 node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
449 srat_num_cpus++;
450 }
451
452 void __init
453 acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma)
454 {
455 unsigned long paddr, size;
456 u8 pxm;
457 struct node_memblk_s *p, *q, *pend;
458
459 pxm = ma->proximity_domain;
460
461 /* fill node memory chunk structure */
462 paddr = ma->base_addr_hi;
463 paddr = (paddr << 32) | ma->base_addr_lo;
464 size = ma->length_hi;
465 size = (size << 32) | ma->length_lo;
466
467 /* Ignore disabled entries */
468 if (!ma->flags.enabled)
469 return;
470
471 /* record this node in proximity bitmap */
472 pxm_bit_set(pxm);
473
474 /* Insertion sort based on base address */
475 pend = &node_memblk[num_node_memblks];
476 for (p = &node_memblk[0]; p < pend; p++) {
477 if (paddr < p->start_paddr)
478 break;
479 }
480 if (p < pend) {
481 for (q = pend - 1; q >= p; q--)
482 *(q + 1) = *q;
483 }
484 p->start_paddr = paddr;
485 p->size = size;
486 p->nid = pxm;
487 num_node_memblks++;
488 }
489
490 void __init
491 acpi_numa_arch_fixup (void)
492 {
493 int i, j, node_from, node_to;
494
495 /* If there's no SRAT, fix the phys_id and mark node 0 online */
496 if (srat_num_cpus == 0) {
497 node_set_online(0);
498 node_cpuid[0].phys_id = hard_smp_processor_id();
499 return;
500 }
501
502 /*
503 * MCD - This can probably be dropped now. No need for pxm ID to node ID
504 * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
505 */
506 /* calculate total number of nodes in system from PXM bitmap */
507 memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
508 memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
509 nodes_clear(node_online_map);
510 for (i = 0; i < MAX_PXM_DOMAINS; i++) {
511 if (pxm_bit_test(i)) {
512 int nid = num_online_nodes();
513 pxm_to_nid_map[i] = nid;
514 nid_to_pxm_map[nid] = i;
515 node_set_online(nid);
516 }
517 }
518
519 /* set logical node id in memory chunk structure */
520 for (i = 0; i < num_node_memblks; i++)
521 node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
522
523 /* assign memory bank numbers for each chunk on each node */
524 for_each_online_node(i) {
525 int bank;
526
527 bank = 0;
528 for (j = 0; j < num_node_memblks; j++)
529 if (node_memblk[j].nid == i)
530 node_memblk[j].bank = bank++;
531 }
532
533 /* set logical node id in cpu structure */
534 for (i = 0; i < srat_num_cpus; i++)
535 node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
536
537 printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes());
538 printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks);
539
540 if (!slit_table) return;
541 memset(numa_slit, -1, sizeof(numa_slit));
542 for (i=0; i<slit_table->localities; i++) {
543 if (!pxm_bit_test(i))
544 continue;
545 node_from = pxm_to_nid_map[i];
546 for (j=0; j<slit_table->localities; j++) {
547 if (!pxm_bit_test(j))
548 continue;
549 node_to = pxm_to_nid_map[j];
550 node_distance(node_from, node_to) =
551 slit_table->entry[i*slit_table->localities + j];
552 }
553 }
554
555 #ifdef SLIT_DEBUG
556 printk("ACPI 2.0 SLIT locality table:\n");
557 for_each_online_node(i) {
558 for_each_online_node(j)
559 printk("%03d ", node_distance(i,j));
560 printk("\n");
561 }
562 #endif
563 }
564 #endif /* CONFIG_ACPI_NUMA */
565
566 unsigned int
567 acpi_register_gsi (u32 gsi, int edge_level, int active_high_low)
568 {
569 if (has_8259 && gsi < 16)
570 return isa_irq_to_vector(gsi);
571
572 return iosapic_register_intr(gsi,
573 (active_high_low == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
574 (edge_level == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
575 }
576 EXPORT_SYMBOL(acpi_register_gsi);
577
578 #ifdef CONFIG_ACPI_DEALLOCATE_IRQ
579 void
580 acpi_unregister_gsi (u32 gsi)
581 {
582 iosapic_unregister_intr(gsi);
583 }
584 EXPORT_SYMBOL(acpi_unregister_gsi);
585 #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */
586
587 static int __init
588 acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
589 {
590 struct acpi_table_header *fadt_header;
591 struct fadt_descriptor_rev2 *fadt;
592
593 if (!phys_addr || !size)
594 return -EINVAL;
595
596 fadt_header = (struct acpi_table_header *) __va(phys_addr);
597 if (fadt_header->revision != 3)
598 return -ENODEV; /* Only deal with ACPI 2.0 FADT */
599
600 fadt = (struct fadt_descriptor_rev2 *) fadt_header;
601
602 if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
603 acpi_kbd_controller_present = 0;
604
605 if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
606 acpi_legacy_devices = 1;
607
608 acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
609 return 0;
610 }
611
612
613 unsigned long __init
614 acpi_find_rsdp (void)
615 {
616 unsigned long rsdp_phys = 0;
617
618 if (efi.acpi20)
619 rsdp_phys = __pa(efi.acpi20);
620 else if (efi.acpi)
621 printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n");
622 return rsdp_phys;
623 }
624
625
626 int __init
627 acpi_boot_init (void)
628 {
629
630 /*
631 * MADT
632 * ----
633 * Parse the Multiple APIC Description Table (MADT), if exists.
634 * Note that this table provides platform SMP configuration
635 * information -- the successor to MPS tables.
636 */
637
638 if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) {
639 printk(KERN_ERR PREFIX "Can't find MADT\n");
640 goto skip_madt;
641 }
642
643 /* Local APIC */
644
645 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0)
646 printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
647
648 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) < 1)
649 printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n");
650
651 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) < 0)
652 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
653
654 /* I/O APIC */
655
656 if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
657 printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n");
658
659 /* System-Level Interrupt Routing */
660
661 if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
662 printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n");
663
664 if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0)
665 printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
666
667 if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0)
668 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
669 skip_madt:
670
671 /*
672 * FADT says whether a legacy keyboard controller is present.
673 * The FADT also contains an SCI_INT line, by which the system
674 * gets interrupts such as power and sleep buttons. If it's not
675 * on a Legacy interrupt, it needs to be setup.
676 */
677 if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
678 printk(KERN_ERR PREFIX "Can't find FADT\n");
679
680 #ifdef CONFIG_SMP
681 if (available_cpus == 0) {
682 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
683 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
684 smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id();
685 available_cpus = 1; /* We've got at least one of these, no? */
686 }
687 smp_boot_data.cpu_count = available_cpus;
688
689 smp_build_cpu_map();
690 # ifdef CONFIG_ACPI_NUMA
691 if (srat_num_cpus == 0) {
692 int cpu, i = 1;
693 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
694 if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id())
695 node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu];
696 }
697 # endif
698 #endif
699 #ifdef CONFIG_ACPI_NUMA
700 build_cpu_to_node_map();
701 #endif
702 /* Make boot-up look pretty */
703 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
704 return 0;
705 }
706
707 int
708 acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
709 {
710 int vector;
711
712 if (has_8259 && gsi < 16)
713 *irq = isa_irq_to_vector(gsi);
714 else {
715 vector = gsi_to_vector(gsi);
716 if (vector == -1)
717 return -1;
718
719 *irq = vector;
720 }
721 return 0;
722 }
723
724 /*
725 * ACPI based hotplug CPU support
726 */
727 #ifdef CONFIG_ACPI_HOTPLUG_CPU
728 static
729 int
730 acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
731 {
732 #ifdef CONFIG_ACPI_NUMA
733 int pxm_id;
734
735 pxm_id = acpi_get_pxm(handle);
736
737 /*
738 * Assuming that the container driver would have set the proximity
739 * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag
740 */
741 node_cpuid[cpu].nid = (pxm_id < 0) ? 0:
742 pxm_to_nid_map[pxm_id];
743
744 node_cpuid[cpu].phys_id = physid;
745 #endif
746 return(0);
747 }
748
749
750 int
751 acpi_map_lsapic(acpi_handle handle, int *pcpu)
752 {
753 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
754 union acpi_object *obj;
755 struct acpi_table_lsapic *lsapic;
756 cpumask_t tmp_map;
757 long physid;
758 int cpu;
759
760 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
761 return -EINVAL;
762
763 if (!buffer.length || !buffer.pointer)
764 return -EINVAL;
765
766 obj = buffer.pointer;
767 if (obj->type != ACPI_TYPE_BUFFER ||
768 obj->buffer.length < sizeof(*lsapic)) {
769 acpi_os_free(buffer.pointer);
770 return -EINVAL;
771 }
772
773 lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer;
774
775 if ((lsapic->header.type != ACPI_MADT_LSAPIC) ||
776 (!lsapic->flags.enabled)) {
777 acpi_os_free(buffer.pointer);
778 return -EINVAL;
779 }
780
781 physid = ((lsapic->id <<8) | (lsapic->eid));
782
783 acpi_os_free(buffer.pointer);
784 buffer.length = ACPI_ALLOCATE_BUFFER;
785 buffer.pointer = NULL;
786
787 cpus_complement(tmp_map, cpu_present_map);
788 cpu = first_cpu(tmp_map);
789 if(cpu >= NR_CPUS)
790 return -EINVAL;
791
792 acpi_map_cpu2node(handle, cpu, physid);
793
794 cpu_set(cpu, cpu_present_map);
795 ia64_cpu_to_sapicid[cpu] = physid;
796 ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
797
798 *pcpu = cpu;
799 return(0);
800 }
801 EXPORT_SYMBOL(acpi_map_lsapic);
802
803
804 int
805 acpi_unmap_lsapic(int cpu)
806 {
807 int i;
808
809 for (i=0; i<MAX_SAPICS; i++) {
810 if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
811 ia64_acpiid_to_sapicid[i] = -1;
812 break;
813 }
814 }
815 ia64_cpu_to_sapicid[cpu] = -1;
816 cpu_clear(cpu,cpu_present_map);
817
818 #ifdef CONFIG_ACPI_NUMA
819 /* NUMA specific cleanup's */
820 #endif
821
822 return(0);
823 }
824 EXPORT_SYMBOL(acpi_unmap_lsapic);
825 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
826
827
828 #ifdef CONFIG_ACPI_NUMA
829 acpi_status __devinit
830 acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
831 {
832 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
833 union acpi_object *obj;
834 struct acpi_table_iosapic *iosapic;
835 unsigned int gsi_base;
836 int pxm, node;
837
838 /* Only care about objects w/ a method that returns the MADT */
839 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
840 return AE_OK;
841
842 if (!buffer.length || !buffer.pointer)
843 return AE_OK;
844
845 obj = buffer.pointer;
846 if (obj->type != ACPI_TYPE_BUFFER ||
847 obj->buffer.length < sizeof(*iosapic)) {
848 acpi_os_free(buffer.pointer);
849 return AE_OK;
850 }
851
852 iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer;
853
854 if (iosapic->header.type != ACPI_MADT_IOSAPIC) {
855 acpi_os_free(buffer.pointer);
856 return AE_OK;
857 }
858
859 gsi_base = iosapic->global_irq_base;
860
861 acpi_os_free(buffer.pointer);
862
863 /*
864 * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
865 * us which node to associate this with.
866 */
867 pxm = acpi_get_pxm(handle);
868 if (pxm < 0)
869 return AE_OK;
870
871 node = pxm_to_nid_map[pxm];
872
873 if (node >= MAX_NUMNODES || !node_online(node) ||
874 cpus_empty(node_to_cpumask(node)))
875 return AE_OK;
876
877 /* We know a gsi to node mapping! */
878 map_iosapic_to_node(gsi_base, node);
879 return AE_OK;
880 }
881 #endif /* CONFIG_NUMA */
882
883 int
884 acpi_register_ioapic (acpi_handle handle, u64 phys_addr, u32 gsi_base)
885 {
886 int err;
887
888 if ((err = iosapic_init(phys_addr, gsi_base)))
889 return err;
890
891 #if CONFIG_ACPI_NUMA
892 acpi_map_iosapic(handle, 0, NULL, NULL);
893 #endif /* CONFIG_ACPI_NUMA */
894
895 return 0;
896 }
897 EXPORT_SYMBOL(acpi_register_ioapic);
898
899 int
900 acpi_unregister_ioapic (acpi_handle handle, u32 gsi_base)
901 {
902 return iosapic_remove(gsi_base);
903 }
904 EXPORT_SYMBOL(acpi_unregister_ioapic);
905
906 #endif /* CONFIG_ACPI_BOOT */
This page took 0.063621 seconds and 6 git commands to generate.