ACPI: Disable MWAIT via DMI on broken Compal board
[deliverable/linux.git] / drivers / acpi / processor_core.c
1 /*
2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 * TBD:
28 * 1. Make # power states dynamic.
29 * 2. Support duty_cycle values that span bit 4.
30 * 3. Optimize by having scheduler determine business instead of
31 * having us try to calculate it here.
32 * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/dmi.h>
46 #include <linux/moduleparam.h>
47 #include <linux/cpuidle.h>
48
49 #include <asm/io.h>
50 #include <asm/system.h>
51 #include <asm/cpu.h>
52 #include <asm/delay.h>
53 #include <asm/uaccess.h>
54 #include <asm/processor.h>
55 #include <asm/smp.h>
56 #include <asm/acpi.h>
57
58 #include <acpi/acpi_bus.h>
59 #include <acpi/acpi_drivers.h>
60 #include <acpi/processor.h>
61
62 #define ACPI_PROCESSOR_COMPONENT 0x01000000
63 #define ACPI_PROCESSOR_CLASS "processor"
64 #define ACPI_PROCESSOR_DEVICE_NAME "Processor"
65 #define ACPI_PROCESSOR_FILE_INFO "info"
66 #define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
67 #define ACPI_PROCESSOR_FILE_LIMIT "limit"
68 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
69 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81
70 #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
71
72 #define ACPI_PROCESSOR_LIMIT_USER 0
73 #define ACPI_PROCESSOR_LIMIT_THERMAL 1
74
75 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
76 ACPI_MODULE_NAME("processor_core");
77
78 MODULE_AUTHOR("Paul Diefenbaugh");
79 MODULE_DESCRIPTION("ACPI Processor Driver");
80 MODULE_LICENSE("GPL");
81
82 static int acpi_processor_add(struct acpi_device *device);
83 static int acpi_processor_start(struct acpi_device *device);
84 static int acpi_processor_remove(struct acpi_device *device, int type);
85 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
86 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
87 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
88 static int acpi_processor_handle_eject(struct acpi_processor *pr);
89
90
91 static const struct acpi_device_id processor_device_ids[] = {
92 {ACPI_PROCESSOR_HID, 0},
93 {"", 0},
94 };
95 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
96
97 static struct acpi_driver acpi_processor_driver = {
98 .name = "processor",
99 .class = ACPI_PROCESSOR_CLASS,
100 .ids = processor_device_ids,
101 .ops = {
102 .add = acpi_processor_add,
103 .remove = acpi_processor_remove,
104 .start = acpi_processor_start,
105 .suspend = acpi_processor_suspend,
106 .resume = acpi_processor_resume,
107 },
108 };
109
110 #define INSTALL_NOTIFY_HANDLER 1
111 #define UNINSTALL_NOTIFY_HANDLER 2
112
113 static const struct file_operations acpi_processor_info_fops = {
114 .owner = THIS_MODULE,
115 .open = acpi_processor_info_open_fs,
116 .read = seq_read,
117 .llseek = seq_lseek,
118 .release = single_release,
119 };
120
121 DEFINE_PER_CPU(struct acpi_processor *, processors);
122 struct acpi_processor_errata errata __read_mostly;
123 static int set_no_mwait(const struct dmi_system_id *id)
124 {
125 printk(KERN_NOTICE PREFIX "%s detected - "
126 "disable mwait for CPU C-stetes\n", id->ident);
127 idle_nomwait = 1;
128 return 0;
129 }
130
131 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
132 {
133 set_no_mwait, "IFL91 board", {
134 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
135 DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
136 DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
137 DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
138 {
139 set_no_mwait, "Extensa 5220", {
140 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
141 DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
142 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
143 DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
144 {},
145 };
146
147 /* --------------------------------------------------------------------------
148 Errata Handling
149 -------------------------------------------------------------------------- */
150
151 static int acpi_processor_errata_piix4(struct pci_dev *dev)
152 {
153 u8 value1 = 0;
154 u8 value2 = 0;
155
156
157 if (!dev)
158 return -EINVAL;
159
160 /*
161 * Note that 'dev' references the PIIX4 ACPI Controller.
162 */
163
164 switch (dev->revision) {
165 case 0:
166 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
167 break;
168 case 1:
169 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
170 break;
171 case 2:
172 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
173 break;
174 case 3:
175 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
176 break;
177 default:
178 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
179 break;
180 }
181
182 switch (dev->revision) {
183
184 case 0: /* PIIX4 A-step */
185 case 1: /* PIIX4 B-step */
186 /*
187 * See specification changes #13 ("Manual Throttle Duty Cycle")
188 * and #14 ("Enabling and Disabling Manual Throttle"), plus
189 * erratum #5 ("STPCLK# Deassertion Time") from the January
190 * 2002 PIIX4 specification update. Applies to only older
191 * PIIX4 models.
192 */
193 errata.piix4.throttle = 1;
194
195 case 2: /* PIIX4E */
196 case 3: /* PIIX4M */
197 /*
198 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
199 * Livelock") from the January 2002 PIIX4 specification update.
200 * Applies to all PIIX4 models.
201 */
202
203 /*
204 * BM-IDE
205 * ------
206 * Find the PIIX4 IDE Controller and get the Bus Master IDE
207 * Status register address. We'll use this later to read
208 * each IDE controller's DMA status to make sure we catch all
209 * DMA activity.
210 */
211 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
212 PCI_DEVICE_ID_INTEL_82371AB,
213 PCI_ANY_ID, PCI_ANY_ID, NULL);
214 if (dev) {
215 errata.piix4.bmisx = pci_resource_start(dev, 4);
216 pci_dev_put(dev);
217 }
218
219 /*
220 * Type-F DMA
221 * ----------
222 * Find the PIIX4 ISA Controller and read the Motherboard
223 * DMA controller's status to see if Type-F (Fast) DMA mode
224 * is enabled (bit 7) on either channel. Note that we'll
225 * disable C3 support if this is enabled, as some legacy
226 * devices won't operate well if fast DMA is disabled.
227 */
228 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
229 PCI_DEVICE_ID_INTEL_82371AB_0,
230 PCI_ANY_ID, PCI_ANY_ID, NULL);
231 if (dev) {
232 pci_read_config_byte(dev, 0x76, &value1);
233 pci_read_config_byte(dev, 0x77, &value2);
234 if ((value1 & 0x80) || (value2 & 0x80))
235 errata.piix4.fdma = 1;
236 pci_dev_put(dev);
237 }
238
239 break;
240 }
241
242 if (errata.piix4.bmisx)
243 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
244 "Bus master activity detection (BM-IDE) erratum enabled\n"));
245 if (errata.piix4.fdma)
246 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
247 "Type-F DMA livelock erratum (C3 disabled)\n"));
248
249 return 0;
250 }
251
252 static int acpi_processor_errata(struct acpi_processor *pr)
253 {
254 int result = 0;
255 struct pci_dev *dev = NULL;
256
257
258 if (!pr)
259 return -EINVAL;
260
261 /*
262 * PIIX4
263 */
264 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
265 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
266 PCI_ANY_ID, NULL);
267 if (dev) {
268 result = acpi_processor_errata_piix4(dev);
269 pci_dev_put(dev);
270 }
271
272 return result;
273 }
274
275 /* --------------------------------------------------------------------------
276 Common ACPI processor functions
277 -------------------------------------------------------------------------- */
278
279 /*
280 * _PDC is required for a BIOS-OS handshake for most of the newer
281 * ACPI processor features.
282 */
283 static int acpi_processor_set_pdc(struct acpi_processor *pr)
284 {
285 struct acpi_object_list *pdc_in = pr->pdc;
286 acpi_status status = AE_OK;
287
288
289 if (!pdc_in)
290 return status;
291 if (idle_nomwait) {
292 /*
293 * If mwait is disabled for CPU C-states, the C2C3_FFH access
294 * mode will be disabled in the parameter of _PDC object.
295 * Of course C1_FFH access mode will also be disabled.
296 */
297 union acpi_object *obj;
298 u32 *buffer = NULL;
299
300 obj = pdc_in->pointer;
301 buffer = (u32 *)(obj->buffer.pointer);
302 buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
303
304 }
305 status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
306
307 if (ACPI_FAILURE(status))
308 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
309 "Could not evaluate _PDC, using legacy perf. control...\n"));
310
311 return status;
312 }
313
314 /* --------------------------------------------------------------------------
315 FS Interface (/proc)
316 -------------------------------------------------------------------------- */
317
318 static struct proc_dir_entry *acpi_processor_dir = NULL;
319
320 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
321 {
322 struct acpi_processor *pr = seq->private;
323
324
325 if (!pr)
326 goto end;
327
328 seq_printf(seq, "processor id: %d\n"
329 "acpi id: %d\n"
330 "bus mastering control: %s\n"
331 "power management: %s\n"
332 "throttling control: %s\n"
333 "limit interface: %s\n",
334 pr->id,
335 pr->acpi_id,
336 pr->flags.bm_control ? "yes" : "no",
337 pr->flags.power ? "yes" : "no",
338 pr->flags.throttling ? "yes" : "no",
339 pr->flags.limit ? "yes" : "no");
340
341 end:
342 return 0;
343 }
344
345 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
346 {
347 return single_open(file, acpi_processor_info_seq_show,
348 PDE(inode)->data);
349 }
350
351 static int acpi_processor_add_fs(struct acpi_device *device)
352 {
353 struct proc_dir_entry *entry = NULL;
354
355
356 if (!acpi_device_dir(device)) {
357 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
358 acpi_processor_dir);
359 if (!acpi_device_dir(device))
360 return -ENODEV;
361 }
362 acpi_device_dir(device)->owner = THIS_MODULE;
363
364 /* 'info' [R] */
365 entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
366 S_IRUGO, acpi_device_dir(device),
367 &acpi_processor_info_fops,
368 acpi_driver_data(device));
369 if (!entry)
370 return -EIO;
371
372 /* 'throttling' [R/W] */
373 entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
374 S_IFREG | S_IRUGO | S_IWUSR,
375 acpi_device_dir(device),
376 &acpi_processor_throttling_fops,
377 acpi_driver_data(device));
378 if (!entry)
379 return -EIO;
380
381 /* 'limit' [R/W] */
382 entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
383 S_IFREG | S_IRUGO | S_IWUSR,
384 acpi_device_dir(device),
385 &acpi_processor_limit_fops,
386 acpi_driver_data(device));
387 if (!entry)
388 return -EIO;
389 return 0;
390 }
391
392 static int acpi_processor_remove_fs(struct acpi_device *device)
393 {
394
395 if (acpi_device_dir(device)) {
396 remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
397 acpi_device_dir(device));
398 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
399 acpi_device_dir(device));
400 remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
401 acpi_device_dir(device));
402 remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
403 acpi_device_dir(device) = NULL;
404 }
405
406 return 0;
407 }
408
409 /* Use the acpiid in MADT to map cpus in case of SMP */
410
411 #ifndef CONFIG_SMP
412 static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;}
413 #else
414
415 static struct acpi_table_madt *madt;
416
417 static int map_lapic_id(struct acpi_subtable_header *entry,
418 u32 acpi_id, int *apic_id)
419 {
420 struct acpi_madt_local_apic *lapic =
421 (struct acpi_madt_local_apic *)entry;
422 if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
423 lapic->processor_id == acpi_id) {
424 *apic_id = lapic->id;
425 return 1;
426 }
427 return 0;
428 }
429
430 static int map_lsapic_id(struct acpi_subtable_header *entry,
431 u32 acpi_id, int *apic_id)
432 {
433 struct acpi_madt_local_sapic *lsapic =
434 (struct acpi_madt_local_sapic *)entry;
435 /* Only check enabled APICs*/
436 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
437 /* First check against id */
438 if (lsapic->processor_id == acpi_id) {
439 *apic_id = (lsapic->id << 8) | lsapic->eid;
440 return 1;
441 /* Check against optional uid */
442 } else if (entry->length >= 16 &&
443 lsapic->uid == acpi_id) {
444 *apic_id = lsapic->uid;
445 return 1;
446 }
447 }
448 return 0;
449 }
450
451 static int map_madt_entry(u32 acpi_id)
452 {
453 unsigned long madt_end, entry;
454 int apic_id = -1;
455
456 if (!madt)
457 return apic_id;
458
459 entry = (unsigned long)madt;
460 madt_end = entry + madt->header.length;
461
462 /* Parse all entries looking for a match. */
463
464 entry += sizeof(struct acpi_table_madt);
465 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
466 struct acpi_subtable_header *header =
467 (struct acpi_subtable_header *)entry;
468 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
469 if (map_lapic_id(header, acpi_id, &apic_id))
470 break;
471 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
472 if (map_lsapic_id(header, acpi_id, &apic_id))
473 break;
474 }
475 entry += header->length;
476 }
477 return apic_id;
478 }
479
480 static int map_mat_entry(acpi_handle handle, u32 acpi_id)
481 {
482 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
483 union acpi_object *obj;
484 struct acpi_subtable_header *header;
485 int apic_id = -1;
486
487 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
488 goto exit;
489
490 if (!buffer.length || !buffer.pointer)
491 goto exit;
492
493 obj = buffer.pointer;
494 if (obj->type != ACPI_TYPE_BUFFER ||
495 obj->buffer.length < sizeof(struct acpi_subtable_header)) {
496 goto exit;
497 }
498
499 header = (struct acpi_subtable_header *)obj->buffer.pointer;
500 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
501 map_lapic_id(header, acpi_id, &apic_id);
502 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
503 map_lsapic_id(header, acpi_id, &apic_id);
504 }
505
506 exit:
507 if (buffer.pointer)
508 kfree(buffer.pointer);
509 return apic_id;
510 }
511
512 static int get_cpu_id(acpi_handle handle, u32 acpi_id)
513 {
514 int i;
515 int apic_id = -1;
516
517 apic_id = map_mat_entry(handle, acpi_id);
518 if (apic_id == -1)
519 apic_id = map_madt_entry(acpi_id);
520 if (apic_id == -1)
521 return apic_id;
522
523 for_each_possible_cpu(i) {
524 if (cpu_physical_id(i) == apic_id)
525 return i;
526 }
527 return -1;
528 }
529 #endif
530
531 /* --------------------------------------------------------------------------
532 Driver Interface
533 -------------------------------------------------------------------------- */
534
535 static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
536 {
537 acpi_status status = 0;
538 union acpi_object object = { 0 };
539 struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
540 int cpu_index;
541 static int cpu0_initialized;
542
543
544 if (!pr)
545 return -EINVAL;
546
547 if (num_online_cpus() > 1)
548 errata.smp = TRUE;
549
550 acpi_processor_errata(pr);
551
552 /*
553 * Check to see if we have bus mastering arbitration control. This
554 * is required for proper C3 usage (to maintain cache coherency).
555 */
556 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
557 pr->flags.bm_control = 1;
558 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
559 "Bus mastering arbitration control present\n"));
560 } else
561 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
562 "No bus mastering arbitration control\n"));
563
564 /* Check if it is a Device with HID and UID */
565 if (has_uid) {
566 unsigned long value;
567 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
568 NULL, &value);
569 if (ACPI_FAILURE(status)) {
570 printk(KERN_ERR PREFIX "Evaluating processor _UID\n");
571 return -ENODEV;
572 }
573 pr->acpi_id = value;
574 } else {
575 /*
576 * Evalute the processor object. Note that it is common on SMP to
577 * have the first (boot) processor with a valid PBLK address while
578 * all others have a NULL address.
579 */
580 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
581 if (ACPI_FAILURE(status)) {
582 printk(KERN_ERR PREFIX "Evaluating processor object\n");
583 return -ENODEV;
584 }
585
586 /*
587 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
588 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
589 */
590 pr->acpi_id = object.processor.proc_id;
591 }
592 cpu_index = get_cpu_id(pr->handle, pr->acpi_id);
593
594 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
595 if (!cpu0_initialized && (cpu_index == -1) &&
596 (num_online_cpus() == 1)) {
597 cpu_index = 0;
598 }
599
600 cpu0_initialized = 1;
601
602 pr->id = cpu_index;
603
604 /*
605 * Extra Processor objects may be enumerated on MP systems with
606 * less than the max # of CPUs. They should be ignored _iff
607 * they are physically not present.
608 */
609 if (pr->id == -1) {
610 if (ACPI_FAILURE
611 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
612 return -ENODEV;
613 }
614 }
615
616 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
617 pr->acpi_id));
618
619 if (!object.processor.pblk_address)
620 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
621 else if (object.processor.pblk_length != 6)
622 printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
623 object.processor.pblk_length);
624 else {
625 pr->throttling.address = object.processor.pblk_address;
626 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
627 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
628
629 pr->pblk = object.processor.pblk_address;
630
631 /*
632 * We don't care about error returns - we just try to mark
633 * these reserved so that nobody else is confused into thinking
634 * that this region might be unused..
635 *
636 * (In particular, allocating the IO range for Cardbus)
637 */
638 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
639 }
640
641 /*
642 * If ACPI describes a slot number for this CPU, we can use it
643 * ensure we get the right value in the "physical id" field
644 * of /proc/cpuinfo
645 */
646 status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
647 if (ACPI_SUCCESS(status))
648 arch_fix_phys_package_id(pr->id, object.integer.value);
649
650 return 0;
651 }
652
653 static DEFINE_PER_CPU(void *, processor_device_array);
654
655 static int __cpuinit acpi_processor_start(struct acpi_device *device)
656 {
657 int result = 0;
658 acpi_status status = AE_OK;
659 struct acpi_processor *pr;
660 struct sys_device *sysdev;
661
662 pr = acpi_driver_data(device);
663
664 result = acpi_processor_get_info(pr, device->flags.unique_id);
665 if (result) {
666 /* Processor is physically not present */
667 return 0;
668 }
669
670 BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
671
672 /*
673 * Buggy BIOS check
674 * ACPI id of processors can be reported wrongly by the BIOS.
675 * Don't trust it blindly
676 */
677 if (per_cpu(processor_device_array, pr->id) != NULL &&
678 per_cpu(processor_device_array, pr->id) != device) {
679 printk(KERN_WARNING "BIOS reported wrong ACPI id "
680 "for the processor\n");
681 return -ENODEV;
682 }
683 per_cpu(processor_device_array, pr->id) = device;
684
685 per_cpu(processors, pr->id) = pr;
686
687 result = acpi_processor_add_fs(device);
688 if (result)
689 goto end;
690
691 sysdev = get_cpu_sysdev(pr->id);
692 if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
693 return -EFAULT;
694
695 status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
696 acpi_processor_notify, pr);
697
698 /* _PDC call should be done before doing anything else (if reqd.). */
699 arch_acpi_processor_init_pdc(pr);
700 acpi_processor_set_pdc(pr);
701 #ifdef CONFIG_CPU_FREQ
702 acpi_processor_ppc_has_changed(pr);
703 #endif
704 acpi_processor_get_throttling_info(pr);
705 acpi_processor_get_limit_info(pr);
706
707
708 acpi_processor_power_init(pr, device);
709
710 pr->cdev = thermal_cooling_device_register("Processor", device,
711 &processor_cooling_ops);
712 if (IS_ERR(pr->cdev)) {
713 result = PTR_ERR(pr->cdev);
714 goto end;
715 }
716
717 printk(KERN_INFO PREFIX
718 "%s is registered as cooling_device%d\n",
719 device->dev.bus_id, pr->cdev->id);
720
721 result = sysfs_create_link(&device->dev.kobj,
722 &pr->cdev->device.kobj,
723 "thermal_cooling");
724 if (result)
725 printk(KERN_ERR PREFIX "Create sysfs link\n");
726 result = sysfs_create_link(&pr->cdev->device.kobj,
727 &device->dev.kobj,
728 "device");
729 if (result)
730 printk(KERN_ERR PREFIX "Create sysfs link\n");
731
732 if (pr->flags.throttling) {
733 printk(KERN_INFO PREFIX "%s [%s] (supports",
734 acpi_device_name(device), acpi_device_bid(device));
735 printk(" %d throttling states", pr->throttling.state_count);
736 printk(")\n");
737 }
738
739 end:
740
741 return result;
742 }
743
744 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
745 {
746 struct acpi_processor *pr = data;
747 struct acpi_device *device = NULL;
748 int saved;
749
750 if (!pr)
751 return;
752
753 if (acpi_bus_get_device(pr->handle, &device))
754 return;
755
756 switch (event) {
757 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
758 saved = pr->performance_platform_limit;
759 acpi_processor_ppc_has_changed(pr);
760 if (saved == pr->performance_platform_limit)
761 break;
762 acpi_bus_generate_proc_event(device, event,
763 pr->performance_platform_limit);
764 acpi_bus_generate_netlink_event(device->pnp.device_class,
765 device->dev.bus_id, event,
766 pr->performance_platform_limit);
767 break;
768 case ACPI_PROCESSOR_NOTIFY_POWER:
769 acpi_processor_cst_has_changed(pr);
770 acpi_bus_generate_proc_event(device, event, 0);
771 acpi_bus_generate_netlink_event(device->pnp.device_class,
772 device->dev.bus_id, event, 0);
773 break;
774 case ACPI_PROCESSOR_NOTIFY_THROTTLING:
775 acpi_processor_tstate_has_changed(pr);
776 acpi_bus_generate_proc_event(device, event, 0);
777 acpi_bus_generate_netlink_event(device->pnp.device_class,
778 device->dev.bus_id, event, 0);
779 default:
780 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
781 "Unsupported event [0x%x]\n", event));
782 break;
783 }
784
785 return;
786 }
787
788 static int acpi_cpu_soft_notify(struct notifier_block *nfb,
789 unsigned long action, void *hcpu)
790 {
791 unsigned int cpu = (unsigned long)hcpu;
792 struct acpi_processor *pr = per_cpu(processors, cpu);
793
794 if (action == CPU_ONLINE && pr) {
795 acpi_processor_ppc_has_changed(pr);
796 acpi_processor_cst_has_changed(pr);
797 acpi_processor_tstate_has_changed(pr);
798 }
799 return NOTIFY_OK;
800 }
801
802 static struct notifier_block acpi_cpu_notifier =
803 {
804 .notifier_call = acpi_cpu_soft_notify,
805 };
806
807 static int acpi_processor_add(struct acpi_device *device)
808 {
809 struct acpi_processor *pr = NULL;
810
811
812 if (!device)
813 return -EINVAL;
814
815 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
816 if (!pr)
817 return -ENOMEM;
818
819 pr->handle = device->handle;
820 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
821 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
822 acpi_driver_data(device) = pr;
823
824 return 0;
825 }
826
827 static int acpi_processor_remove(struct acpi_device *device, int type)
828 {
829 acpi_status status = AE_OK;
830 struct acpi_processor *pr = NULL;
831
832
833 if (!device || !acpi_driver_data(device))
834 return -EINVAL;
835
836 pr = acpi_driver_data(device);
837
838 if (pr->id >= nr_cpu_ids) {
839 kfree(pr);
840 return 0;
841 }
842
843 if (type == ACPI_BUS_REMOVAL_EJECT) {
844 if (acpi_processor_handle_eject(pr))
845 return -EINVAL;
846 }
847
848 acpi_processor_power_exit(pr, device);
849
850 status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
851 acpi_processor_notify);
852
853 sysfs_remove_link(&device->dev.kobj, "sysdev");
854
855 acpi_processor_remove_fs(device);
856
857 if (pr->cdev) {
858 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
859 sysfs_remove_link(&pr->cdev->device.kobj, "device");
860 thermal_cooling_device_unregister(pr->cdev);
861 pr->cdev = NULL;
862 }
863
864 per_cpu(processors, pr->id) = NULL;
865 per_cpu(processor_device_array, pr->id) = NULL;
866 kfree(pr);
867
868 return 0;
869 }
870
871 #ifdef CONFIG_ACPI_HOTPLUG_CPU
872 /****************************************************************************
873 * Acpi processor hotplug support *
874 ****************************************************************************/
875
876 static int is_processor_present(acpi_handle handle)
877 {
878 acpi_status status;
879 unsigned long sta = 0;
880
881
882 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
883
884 if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
885 return 1;
886
887 /*
888 * _STA is mandatory for a processor that supports hot plug
889 */
890 if (status == AE_NOT_FOUND)
891 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
892 "Processor does not support hot plug\n"));
893 else
894 ACPI_EXCEPTION((AE_INFO, status,
895 "Processor Device is not present"));
896 return 0;
897 }
898
899 static
900 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
901 {
902 acpi_handle phandle;
903 struct acpi_device *pdev;
904 struct acpi_processor *pr;
905
906
907 if (acpi_get_parent(handle, &phandle)) {
908 return -ENODEV;
909 }
910
911 if (acpi_bus_get_device(phandle, &pdev)) {
912 return -ENODEV;
913 }
914
915 if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
916 return -ENODEV;
917 }
918
919 acpi_bus_start(*device);
920
921 pr = acpi_driver_data(*device);
922 if (!pr)
923 return -ENODEV;
924
925 if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
926 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
927 }
928 return 0;
929 }
930
931 static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
932 u32 event, void *data)
933 {
934 struct acpi_processor *pr;
935 struct acpi_device *device = NULL;
936 int result;
937
938
939 switch (event) {
940 case ACPI_NOTIFY_BUS_CHECK:
941 case ACPI_NOTIFY_DEVICE_CHECK:
942 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
943 "Processor driver received %s event\n",
944 (event == ACPI_NOTIFY_BUS_CHECK) ?
945 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
946
947 if (!is_processor_present(handle))
948 break;
949
950 if (acpi_bus_get_device(handle, &device)) {
951 result = acpi_processor_device_add(handle, &device);
952 if (result)
953 printk(KERN_ERR PREFIX
954 "Unable to add the device\n");
955 break;
956 }
957
958 pr = acpi_driver_data(device);
959 if (!pr) {
960 printk(KERN_ERR PREFIX "Driver data is NULL\n");
961 break;
962 }
963
964 if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
965 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
966 break;
967 }
968
969 result = acpi_processor_start(device);
970 if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
971 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
972 } else {
973 printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
974 acpi_device_bid(device));
975 }
976 break;
977 case ACPI_NOTIFY_EJECT_REQUEST:
978 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
979 "received ACPI_NOTIFY_EJECT_REQUEST\n"));
980
981 if (acpi_bus_get_device(handle, &device)) {
982 printk(KERN_ERR PREFIX
983 "Device don't exist, dropping EJECT\n");
984 break;
985 }
986 pr = acpi_driver_data(device);
987 if (!pr) {
988 printk(KERN_ERR PREFIX
989 "Driver data is NULL, dropping EJECT\n");
990 return;
991 }
992
993 if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
994 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
995 break;
996 default:
997 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
998 "Unsupported event [0x%x]\n", event));
999 break;
1000 }
1001
1002 return;
1003 }
1004
1005 static acpi_status
1006 processor_walk_namespace_cb(acpi_handle handle,
1007 u32 lvl, void *context, void **rv)
1008 {
1009 acpi_status status;
1010 int *action = context;
1011 acpi_object_type type = 0;
1012
1013 status = acpi_get_type(handle, &type);
1014 if (ACPI_FAILURE(status))
1015 return (AE_OK);
1016
1017 if (type != ACPI_TYPE_PROCESSOR)
1018 return (AE_OK);
1019
1020 switch (*action) {
1021 case INSTALL_NOTIFY_HANDLER:
1022 acpi_install_notify_handler(handle,
1023 ACPI_SYSTEM_NOTIFY,
1024 acpi_processor_hotplug_notify,
1025 NULL);
1026 break;
1027 case UNINSTALL_NOTIFY_HANDLER:
1028 acpi_remove_notify_handler(handle,
1029 ACPI_SYSTEM_NOTIFY,
1030 acpi_processor_hotplug_notify);
1031 break;
1032 default:
1033 break;
1034 }
1035
1036 return (AE_OK);
1037 }
1038
1039 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1040 {
1041
1042 if (!is_processor_present(handle)) {
1043 return AE_ERROR;
1044 }
1045
1046 if (acpi_map_lsapic(handle, p_cpu))
1047 return AE_ERROR;
1048
1049 if (arch_register_cpu(*p_cpu)) {
1050 acpi_unmap_lsapic(*p_cpu);
1051 return AE_ERROR;
1052 }
1053
1054 return AE_OK;
1055 }
1056
1057 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1058 {
1059 if (cpu_online(pr->id))
1060 cpu_down(pr->id);
1061
1062 arch_unregister_cpu(pr->id);
1063 acpi_unmap_lsapic(pr->id);
1064 return (0);
1065 }
1066 #else
1067 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1068 {
1069 return AE_ERROR;
1070 }
1071 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1072 {
1073 return (-EINVAL);
1074 }
1075 #endif
1076
1077 static
1078 void acpi_processor_install_hotplug_notify(void)
1079 {
1080 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1081 int action = INSTALL_NOTIFY_HANDLER;
1082 acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1083 ACPI_ROOT_OBJECT,
1084 ACPI_UINT32_MAX,
1085 processor_walk_namespace_cb, &action, NULL);
1086 #endif
1087 register_hotcpu_notifier(&acpi_cpu_notifier);
1088 }
1089
1090 static
1091 void acpi_processor_uninstall_hotplug_notify(void)
1092 {
1093 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1094 int action = UNINSTALL_NOTIFY_HANDLER;
1095 acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1096 ACPI_ROOT_OBJECT,
1097 ACPI_UINT32_MAX,
1098 processor_walk_namespace_cb, &action, NULL);
1099 #endif
1100 unregister_hotcpu_notifier(&acpi_cpu_notifier);
1101 }
1102
1103 /*
1104 * We keep the driver loaded even when ACPI is not running.
1105 * This is needed for the powernow-k8 driver, that works even without
1106 * ACPI, but needs symbols from this driver
1107 */
1108
1109 static int __init acpi_processor_init(void)
1110 {
1111 int result = 0;
1112
1113 memset(&errata, 0, sizeof(errata));
1114
1115 #ifdef CONFIG_SMP
1116 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1117 (struct acpi_table_header **)&madt)))
1118 madt = NULL;
1119 #endif
1120
1121 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1122 if (!acpi_processor_dir)
1123 return -ENOMEM;
1124 acpi_processor_dir->owner = THIS_MODULE;
1125
1126 /*
1127 * Check whether the system is DMI table. If yes, OSPM
1128 * should not use mwait for CPU-states.
1129 */
1130 dmi_check_system(processor_idle_dmi_table);
1131 result = cpuidle_register_driver(&acpi_idle_driver);
1132 if (result < 0)
1133 goto out_proc;
1134
1135 result = acpi_bus_register_driver(&acpi_processor_driver);
1136 if (result < 0)
1137 goto out_cpuidle;
1138
1139 acpi_processor_install_hotplug_notify();
1140
1141 acpi_thermal_cpufreq_init();
1142
1143 acpi_processor_ppc_init();
1144
1145 acpi_processor_throttling_init();
1146
1147 return 0;
1148
1149 out_cpuidle:
1150 cpuidle_unregister_driver(&acpi_idle_driver);
1151
1152 out_proc:
1153 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1154
1155 return result;
1156 }
1157
1158 static void __exit acpi_processor_exit(void)
1159 {
1160 acpi_processor_ppc_exit();
1161
1162 acpi_thermal_cpufreq_exit();
1163
1164 acpi_processor_uninstall_hotplug_notify();
1165
1166 acpi_bus_unregister_driver(&acpi_processor_driver);
1167
1168 cpuidle_unregister_driver(&acpi_idle_driver);
1169
1170 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1171
1172 return;
1173 }
1174
1175 module_init(acpi_processor_init);
1176 module_exit(acpi_processor_exit);
1177
1178 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1179
1180 MODULE_ALIAS("processor");
This page took 0.055247 seconds and 5 git commands to generate.