hwmon: (coretemp) Fixup platform device ID change
[deliverable/linux.git] / drivers / hwmon / coretemp.c
1 /*
2 * coretemp.c - Linux kernel module for hardware monitoring
3 *
4 * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
5 *
6 * Inspired from many hwmon drivers
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301 USA.
21 */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/jiffies.h>
29 #include <linux/hwmon.h>
30 #include <linux/sysfs.h>
31 #include <linux/hwmon-sysfs.h>
32 #include <linux/err.h>
33 #include <linux/mutex.h>
34 #include <linux/list.h>
35 #include <linux/platform_device.h>
36 #include <linux/cpu.h>
37 #include <linux/pci.h>
38 #include <linux/smp.h>
39 #include <linux/moduleparam.h>
40 #include <asm/msr.h>
41 #include <asm/processor.h>
42
43 #define DRVNAME "coretemp"
44
45 /*
46 * force_tjmax only matters when TjMax can't be read from the CPU itself.
47 * When set, it replaces the driver's suboptimal heuristic.
48 */
49 static int force_tjmax;
50 module_param_named(tjmax, force_tjmax, int, 0444);
51 MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
52
53 #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
54 #define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
55 #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
56 #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
57 #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
58 #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
59
60 #ifdef CONFIG_SMP
61 #define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
62 #define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
63 #define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
64 #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
65 #else
66 #define TO_PHYS_ID(cpu) (cpu)
67 #define TO_CORE_ID(cpu) (cpu)
68 #define TO_ATTR_NO(cpu) (cpu)
69 #define for_each_sibling(i, cpu) for (i = 0; false; )
70 #endif
71
72 /*
73 * Per-Core Temperature Data
74 * @last_updated: The time when the current temperature value was updated
75 * earlier (in jiffies).
76 * @cpu_core_id: The CPU Core from which temperature values should be read
77 * This value is passed as "id" field to rdmsr/wrmsr functions.
78 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
79 * from where the temperature values should be read.
80 * @attr_size: Total number of pre-core attrs displayed in the sysfs.
81 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
82 * Otherwise, temp_data holds coretemp data.
83 * @valid: If this is 1, the current temperature is valid.
84 */
85 struct temp_data {
86 int temp;
87 int ttarget;
88 int tjmax;
89 unsigned long last_updated;
90 unsigned int cpu;
91 u32 cpu_core_id;
92 u32 status_reg;
93 int attr_size;
94 bool is_pkg_data;
95 bool valid;
96 struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
97 char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
98 struct mutex update_lock;
99 };
100
101 /* Platform Data per Physical CPU */
102 struct platform_data {
103 struct device *hwmon_dev;
104 u16 phys_proc_id;
105 struct temp_data *core_data[MAX_CORE_DATA];
106 struct device_attribute name_attr;
107 };
108
109 struct pdev_entry {
110 struct list_head list;
111 struct platform_device *pdev;
112 u16 phys_proc_id;
113 };
114
115 static LIST_HEAD(pdev_list);
116 static DEFINE_MUTEX(pdev_list_mutex);
117
118 static ssize_t show_name(struct device *dev,
119 struct device_attribute *devattr, char *buf)
120 {
121 return sprintf(buf, "%s\n", DRVNAME);
122 }
123
124 static ssize_t show_label(struct device *dev,
125 struct device_attribute *devattr, char *buf)
126 {
127 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
128 struct platform_data *pdata = dev_get_drvdata(dev);
129 struct temp_data *tdata = pdata->core_data[attr->index];
130
131 if (tdata->is_pkg_data)
132 return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id);
133
134 return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
135 }
136
137 static ssize_t show_crit_alarm(struct device *dev,
138 struct device_attribute *devattr, char *buf)
139 {
140 u32 eax, edx;
141 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
142 struct platform_data *pdata = dev_get_drvdata(dev);
143 struct temp_data *tdata = pdata->core_data[attr->index];
144
145 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
146
147 return sprintf(buf, "%d\n", (eax >> 5) & 1);
148 }
149
150 static ssize_t show_tjmax(struct device *dev,
151 struct device_attribute *devattr, char *buf)
152 {
153 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
154 struct platform_data *pdata = dev_get_drvdata(dev);
155
156 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tjmax);
157 }
158
159 static ssize_t show_ttarget(struct device *dev,
160 struct device_attribute *devattr, char *buf)
161 {
162 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
163 struct platform_data *pdata = dev_get_drvdata(dev);
164
165 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
166 }
167
168 static ssize_t show_temp(struct device *dev,
169 struct device_attribute *devattr, char *buf)
170 {
171 u32 eax, edx;
172 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
173 struct platform_data *pdata = dev_get_drvdata(dev);
174 struct temp_data *tdata = pdata->core_data[attr->index];
175
176 mutex_lock(&tdata->update_lock);
177
178 /* Check whether the time interval has elapsed */
179 if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) {
180 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
181 tdata->valid = 0;
182 /* Check whether the data is valid */
183 if (eax & 0x80000000) {
184 tdata->temp = tdata->tjmax -
185 ((eax >> 16) & 0x7f) * 1000;
186 tdata->valid = 1;
187 }
188 tdata->last_updated = jiffies;
189 }
190
191 mutex_unlock(&tdata->update_lock);
192 return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
193 }
194
195 static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
196 {
197 /* The 100C is default for both mobile and non mobile CPUs */
198
199 int tjmax = 100000;
200 int tjmax_ee = 85000;
201 int usemsr_ee = 1;
202 int err;
203 u32 eax, edx;
204 struct pci_dev *host_bridge;
205
206 /* Early chips have no MSR for TjMax */
207
208 if (c->x86_model == 0xf && c->x86_mask < 4)
209 usemsr_ee = 0;
210
211 /* Atom CPUs */
212
213 if (c->x86_model == 0x1c) {
214 usemsr_ee = 0;
215
216 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
217
218 if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL
219 && (host_bridge->device == 0xa000 /* NM10 based nettop */
220 || host_bridge->device == 0xa010)) /* NM10 based netbook */
221 tjmax = 100000;
222 else
223 tjmax = 90000;
224
225 pci_dev_put(host_bridge);
226 }
227
228 if (c->x86_model > 0xe && usemsr_ee) {
229 u8 platform_id;
230
231 /*
232 * Now we can detect the mobile CPU using Intel provided table
233 * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
234 * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
235 */
236 err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx);
237 if (err) {
238 dev_warn(dev,
239 "Unable to access MSR 0x17, assuming desktop"
240 " CPU\n");
241 usemsr_ee = 0;
242 } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) {
243 /*
244 * Trust bit 28 up to Penryn, I could not find any
245 * documentation on that; if you happen to know
246 * someone at Intel please ask
247 */
248 usemsr_ee = 0;
249 } else {
250 /* Platform ID bits 52:50 (EDX starts at bit 32) */
251 platform_id = (edx >> 18) & 0x7;
252
253 /*
254 * Mobile Penryn CPU seems to be platform ID 7 or 5
255 * (guesswork)
256 */
257 if (c->x86_model == 0x17 &&
258 (platform_id == 5 || platform_id == 7)) {
259 /*
260 * If MSR EE bit is set, set it to 90 degrees C,
261 * otherwise 105 degrees C
262 */
263 tjmax_ee = 90000;
264 tjmax = 105000;
265 }
266 }
267 }
268
269 if (usemsr_ee) {
270 err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
271 if (err) {
272 dev_warn(dev,
273 "Unable to access MSR 0xEE, for Tjmax, left"
274 " at default\n");
275 } else if (eax & 0x40000000) {
276 tjmax = tjmax_ee;
277 }
278 } else if (tjmax == 100000) {
279 /*
280 * If we don't use msr EE it means we are desktop CPU
281 * (with exeception of Atom)
282 */
283 dev_warn(dev, "Using relative temperature scale!\n");
284 }
285
286 return tjmax;
287 }
288
289 static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
290 {
291 int err;
292 u32 eax, edx;
293 u32 val;
294
295 /*
296 * A new feature of current Intel(R) processors, the
297 * IA32_TEMPERATURE_TARGET contains the TjMax value
298 */
299 err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
300 if (err) {
301 if (c->x86_model > 0xe && c->x86_model != 0x1c)
302 dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
303 } else {
304 val = (eax >> 16) & 0xff;
305 /*
306 * If the TjMax is not plausible, an assumption
307 * will be used
308 */
309 if (val) {
310 dev_dbg(dev, "TjMax is %d degrees C\n", val);
311 return val * 1000;
312 }
313 }
314
315 if (force_tjmax) {
316 dev_notice(dev, "TjMax forced to %d degrees C by user\n",
317 force_tjmax);
318 return force_tjmax * 1000;
319 }
320
321 /*
322 * An assumption is made for early CPUs and unreadable MSR.
323 * NOTE: the calculated value may not be correct.
324 */
325 return adjust_tjmax(c, id, dev);
326 }
327
328 static void __devinit get_ucode_rev_on_cpu(void *edx)
329 {
330 u32 eax;
331
332 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
333 sync_core();
334 rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
335 }
336
337 static int create_name_attr(struct platform_data *pdata, struct device *dev)
338 {
339 sysfs_attr_init(&pdata->name_attr.attr);
340 pdata->name_attr.attr.name = "name";
341 pdata->name_attr.attr.mode = S_IRUGO;
342 pdata->name_attr.show = show_name;
343 return device_create_file(dev, &pdata->name_attr);
344 }
345
346 static int create_core_attrs(struct temp_data *tdata, struct device *dev,
347 int attr_no)
348 {
349 int err, i;
350 static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
351 struct device_attribute *devattr, char *buf) = {
352 show_label, show_crit_alarm, show_temp, show_tjmax,
353 show_ttarget };
354 static const char *const names[TOTAL_ATTRS] = {
355 "temp%d_label", "temp%d_crit_alarm",
356 "temp%d_input", "temp%d_crit",
357 "temp%d_max" };
358
359 for (i = 0; i < tdata->attr_size; i++) {
360 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
361 attr_no);
362 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
363 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
364 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
365 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
366 tdata->sd_attrs[i].index = attr_no;
367 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
368 if (err)
369 goto exit_free;
370 }
371 return 0;
372
373 exit_free:
374 while (--i >= 0)
375 device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
376 return err;
377 }
378
379
380 static int __cpuinit chk_ucode_version(unsigned int cpu)
381 {
382 struct cpuinfo_x86 *c = &cpu_data(cpu);
383 int err;
384 u32 edx;
385
386 /*
387 * Check if we have problem with errata AE18 of Core processors:
388 * Readings might stop update when processor visited too deep sleep,
389 * fixed for stepping D0 (6EC).
390 */
391 if (c->x86_model == 0xe && c->x86_mask < 0xc) {
392 /* check for microcode update */
393 err = smp_call_function_single(cpu, get_ucode_rev_on_cpu,
394 &edx, 1);
395 if (err) {
396 pr_err("Cannot determine microcode revision of "
397 "CPU#%u (%d)!\n", cpu, err);
398 return -ENODEV;
399 } else if (edx < 0x39) {
400 pr_err("Errata AE18 not fixed, update BIOS or "
401 "microcode of the CPU!\n");
402 return -ENODEV;
403 }
404 }
405 return 0;
406 }
407
408 static struct platform_device *coretemp_get_pdev(unsigned int cpu)
409 {
410 u16 phys_proc_id = TO_PHYS_ID(cpu);
411 struct pdev_entry *p;
412
413 mutex_lock(&pdev_list_mutex);
414
415 list_for_each_entry(p, &pdev_list, list)
416 if (p->phys_proc_id == phys_proc_id) {
417 mutex_unlock(&pdev_list_mutex);
418 return p->pdev;
419 }
420
421 mutex_unlock(&pdev_list_mutex);
422 return NULL;
423 }
424
425 static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
426 {
427 struct temp_data *tdata;
428
429 tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL);
430 if (!tdata)
431 return NULL;
432
433 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
434 MSR_IA32_THERM_STATUS;
435 tdata->is_pkg_data = pkg_flag;
436 tdata->cpu = cpu;
437 tdata->cpu_core_id = TO_CORE_ID(cpu);
438 tdata->attr_size = MAX_CORE_ATTRS;
439 mutex_init(&tdata->update_lock);
440 return tdata;
441 }
442
443 static int create_core_data(struct platform_device *pdev,
444 unsigned int cpu, int pkg_flag)
445 {
446 struct temp_data *tdata;
447 struct platform_data *pdata = platform_get_drvdata(pdev);
448 struct cpuinfo_x86 *c = &cpu_data(cpu);
449 u32 eax, edx;
450 int err, attr_no;
451
452 /*
453 * Find attr number for sysfs:
454 * We map the attr number to core id of the CPU
455 * The attr number is always core id + 2
456 * The Pkgtemp will always show up as temp1_*, if available
457 */
458 attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu);
459
460 if (attr_no > MAX_CORE_DATA - 1)
461 return -ERANGE;
462
463 /*
464 * Provide a single set of attributes for all HT siblings of a core
465 * to avoid duplicate sensors (the processor ID and core ID of all
466 * HT siblings of a core are the same).
467 * Skip if a HT sibling of this core is already registered.
468 * This is not an error.
469 */
470 if (pdata->core_data[attr_no] != NULL)
471 return 0;
472
473 tdata = init_temp_data(cpu, pkg_flag);
474 if (!tdata)
475 return -ENOMEM;
476
477 /* Test if we can access the status register */
478 err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
479 if (err)
480 goto exit_free;
481
482 /* We can access status register. Get Critical Temperature */
483 tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
484
485 /*
486 * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET.
487 * The target temperature is available on older CPUs but not in this
488 * register. Atoms don't have the register at all.
489 */
490 if (c->x86_model > 0xe && c->x86_model != 0x1c) {
491 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET,
492 &eax, &edx);
493 if (!err) {
494 tdata->ttarget
495 = tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
496 tdata->attr_size++;
497 }
498 }
499
500 pdata->core_data[attr_no] = tdata;
501
502 /* Create sysfs interfaces */
503 err = create_core_attrs(tdata, &pdev->dev, attr_no);
504 if (err)
505 goto exit_free;
506
507 return 0;
508 exit_free:
509 kfree(tdata);
510 return err;
511 }
512
513 static void coretemp_add_core(unsigned int cpu, int pkg_flag)
514 {
515 struct platform_device *pdev = coretemp_get_pdev(cpu);
516 int err;
517
518 if (!pdev)
519 return;
520
521 err = create_core_data(pdev, cpu, pkg_flag);
522 if (err)
523 dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
524 }
525
526 static void coretemp_remove_core(struct platform_data *pdata,
527 struct device *dev, int indx)
528 {
529 int i;
530 struct temp_data *tdata = pdata->core_data[indx];
531
532 /* Remove the sysfs attributes */
533 for (i = 0; i < tdata->attr_size; i++)
534 device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
535
536 kfree(pdata->core_data[indx]);
537 pdata->core_data[indx] = NULL;
538 }
539
540 static int __devinit coretemp_probe(struct platform_device *pdev)
541 {
542 struct platform_data *pdata;
543 int err;
544
545 /* Initialize the per-package data structures */
546 pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
547 if (!pdata)
548 return -ENOMEM;
549
550 err = create_name_attr(pdata, &pdev->dev);
551 if (err)
552 goto exit_free;
553
554 pdata->phys_proc_id = pdev->id;
555 platform_set_drvdata(pdev, pdata);
556
557 pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
558 if (IS_ERR(pdata->hwmon_dev)) {
559 err = PTR_ERR(pdata->hwmon_dev);
560 dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
561 goto exit_name;
562 }
563 return 0;
564
565 exit_name:
566 device_remove_file(&pdev->dev, &pdata->name_attr);
567 platform_set_drvdata(pdev, NULL);
568 exit_free:
569 kfree(pdata);
570 return err;
571 }
572
573 static int __devexit coretemp_remove(struct platform_device *pdev)
574 {
575 struct platform_data *pdata = platform_get_drvdata(pdev);
576 int i;
577
578 for (i = MAX_CORE_DATA - 1; i >= 0; --i)
579 if (pdata->core_data[i])
580 coretemp_remove_core(pdata, &pdev->dev, i);
581
582 device_remove_file(&pdev->dev, &pdata->name_attr);
583 hwmon_device_unregister(pdata->hwmon_dev);
584 platform_set_drvdata(pdev, NULL);
585 kfree(pdata);
586 return 0;
587 }
588
589 static struct platform_driver coretemp_driver = {
590 .driver = {
591 .owner = THIS_MODULE,
592 .name = DRVNAME,
593 },
594 .probe = coretemp_probe,
595 .remove = __devexit_p(coretemp_remove),
596 };
597
598 static int __cpuinit coretemp_device_add(unsigned int cpu)
599 {
600 int err;
601 struct platform_device *pdev;
602 struct pdev_entry *pdev_entry;
603
604 mutex_lock(&pdev_list_mutex);
605
606 pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
607 if (!pdev) {
608 err = -ENOMEM;
609 pr_err("Device allocation failed\n");
610 goto exit;
611 }
612
613 pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
614 if (!pdev_entry) {
615 err = -ENOMEM;
616 goto exit_device_put;
617 }
618
619 err = platform_device_add(pdev);
620 if (err) {
621 pr_err("Device addition failed (%d)\n", err);
622 goto exit_device_free;
623 }
624
625 pdev_entry->pdev = pdev;
626 pdev_entry->phys_proc_id = pdev->id;
627
628 list_add_tail(&pdev_entry->list, &pdev_list);
629 mutex_unlock(&pdev_list_mutex);
630
631 return 0;
632
633 exit_device_free:
634 kfree(pdev_entry);
635 exit_device_put:
636 platform_device_put(pdev);
637 exit:
638 mutex_unlock(&pdev_list_mutex);
639 return err;
640 }
641
642 static void coretemp_device_remove(unsigned int cpu)
643 {
644 struct pdev_entry *p, *n;
645 u16 phys_proc_id = TO_PHYS_ID(cpu);
646
647 mutex_lock(&pdev_list_mutex);
648 list_for_each_entry_safe(p, n, &pdev_list, list) {
649 if (p->phys_proc_id != phys_proc_id)
650 continue;
651 platform_device_unregister(p->pdev);
652 list_del(&p->list);
653 kfree(p);
654 }
655 mutex_unlock(&pdev_list_mutex);
656 }
657
658 static bool is_any_core_online(struct platform_data *pdata)
659 {
660 int i;
661
662 /* Find online cores, except pkgtemp data */
663 for (i = MAX_CORE_DATA - 1; i >= 0; --i) {
664 if (pdata->core_data[i] &&
665 !pdata->core_data[i]->is_pkg_data) {
666 return true;
667 }
668 }
669 return false;
670 }
671
672 static void __cpuinit get_core_online(unsigned int cpu)
673 {
674 struct cpuinfo_x86 *c = &cpu_data(cpu);
675 struct platform_device *pdev = coretemp_get_pdev(cpu);
676 int err;
677
678 /*
679 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
680 * sensors. We check this bit only, all the early CPUs
681 * without thermal sensors will be filtered out.
682 */
683 if (!cpu_has(c, X86_FEATURE_DTS))
684 return;
685
686 if (!pdev) {
687 /* Check the microcode version of the CPU */
688 if (chk_ucode_version(cpu))
689 return;
690
691 /*
692 * Alright, we have DTS support.
693 * We are bringing the _first_ core in this pkg
694 * online. So, initialize per-pkg data structures and
695 * then bring this core online.
696 */
697 err = coretemp_device_add(cpu);
698 if (err)
699 return;
700 /*
701 * Check whether pkgtemp support is available.
702 * If so, add interfaces for pkgtemp.
703 */
704 if (cpu_has(c, X86_FEATURE_PTS))
705 coretemp_add_core(cpu, 1);
706 }
707 /*
708 * Physical CPU device already exists.
709 * So, just add interfaces for this core.
710 */
711 coretemp_add_core(cpu, 0);
712 }
713
714 static void __cpuinit put_core_offline(unsigned int cpu)
715 {
716 int i, indx;
717 struct platform_data *pdata;
718 struct platform_device *pdev = coretemp_get_pdev(cpu);
719
720 /* If the physical CPU device does not exist, just return */
721 if (!pdev)
722 return;
723
724 pdata = platform_get_drvdata(pdev);
725
726 indx = TO_ATTR_NO(cpu);
727
728 if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
729 coretemp_remove_core(pdata, &pdev->dev, indx);
730
731 /*
732 * If a HT sibling of a core is taken offline, but another HT sibling
733 * of the same core is still online, register the alternate sibling.
734 * This ensures that exactly one set of attributes is provided as long
735 * as at least one HT sibling of a core is online.
736 */
737 for_each_sibling(i, cpu) {
738 if (i != cpu) {
739 get_core_online(i);
740 /*
741 * Display temperature sensor data for one HT sibling
742 * per core only, so abort the loop after one such
743 * sibling has been found.
744 */
745 break;
746 }
747 }
748 /*
749 * If all cores in this pkg are offline, remove the device.
750 * coretemp_device_remove calls unregister_platform_device,
751 * which in turn calls coretemp_remove. This removes the
752 * pkgtemp entry and does other clean ups.
753 */
754 if (!is_any_core_online(pdata))
755 coretemp_device_remove(cpu);
756 }
757
758 static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
759 unsigned long action, void *hcpu)
760 {
761 unsigned int cpu = (unsigned long) hcpu;
762
763 switch (action) {
764 case CPU_ONLINE:
765 case CPU_DOWN_FAILED:
766 get_core_online(cpu);
767 break;
768 case CPU_DOWN_PREPARE:
769 put_core_offline(cpu);
770 break;
771 }
772 return NOTIFY_OK;
773 }
774
775 static struct notifier_block coretemp_cpu_notifier __refdata = {
776 .notifier_call = coretemp_cpu_callback,
777 };
778
779 static int __init coretemp_init(void)
780 {
781 int i, err = -ENODEV;
782
783 /* quick check if we run Intel */
784 if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
785 goto exit;
786
787 err = platform_driver_register(&coretemp_driver);
788 if (err)
789 goto exit;
790
791 for_each_online_cpu(i)
792 get_core_online(i);
793
794 #ifndef CONFIG_HOTPLUG_CPU
795 if (list_empty(&pdev_list)) {
796 err = -ENODEV;
797 goto exit_driver_unreg;
798 }
799 #endif
800
801 register_hotcpu_notifier(&coretemp_cpu_notifier);
802 return 0;
803
804 #ifndef CONFIG_HOTPLUG_CPU
805 exit_driver_unreg:
806 platform_driver_unregister(&coretemp_driver);
807 #endif
808 exit:
809 return err;
810 }
811
812 static void __exit coretemp_exit(void)
813 {
814 struct pdev_entry *p, *n;
815
816 unregister_hotcpu_notifier(&coretemp_cpu_notifier);
817 mutex_lock(&pdev_list_mutex);
818 list_for_each_entry_safe(p, n, &pdev_list, list) {
819 platform_device_unregister(p->pdev);
820 list_del(&p->list);
821 kfree(p);
822 }
823 mutex_unlock(&pdev_list_mutex);
824 platform_driver_unregister(&coretemp_driver);
825 }
826
827 MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
828 MODULE_DESCRIPTION("Intel Core temperature monitor");
829 MODULE_LICENSE("GPL");
830
831 module_init(coretemp_init)
832 module_exit(coretemp_exit)
This page took 0.162326 seconds and 5 git commands to generate.