2 * Thermal throttle event support code (such as syslog messaging and rate
3 * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
5 * This allows consistent reporting of CPU thermal throttle events.
7 * Maintains a counter in /sys that keeps track of the number of thermal
8 * events, such that the user knows how bad the thermal problem might be
9 * (since the logging to syslog and mcelog is rate limited).
11 * Author: Dmitriy Zavin (dmitriyz@google.com)
13 * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
14 * Inspired by Ross Biro's and Al Borchers' counter code.
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel.h>
20 #include <linux/percpu.h>
21 #include <linux/export.h>
22 #include <linux/types.h>
23 #include <linux/init.h>
24 #include <linux/smp.h>
25 #include <linux/cpu.h>
27 #include <asm/processor.h>
32 #include <asm/trace/irq_vectors.h>
34 /* How long to wait between reporting thermal events */
35 #define CHECK_INTERVAL (300 * HZ)
37 #define THERMAL_THROTTLING_EVENT 0
38 #define POWER_LIMIT_EVENT 1
41 * Current thermal event state:
43 struct _thermal_state
{
48 unsigned long last_count
;
51 struct thermal_state
{
52 struct _thermal_state core_throttle
;
53 struct _thermal_state core_power_limit
;
54 struct _thermal_state package_throttle
;
55 struct _thermal_state package_power_limit
;
56 struct _thermal_state core_thresh0
;
57 struct _thermal_state core_thresh1
;
58 struct _thermal_state pkg_thresh0
;
59 struct _thermal_state pkg_thresh1
;
62 /* Callback to handle core threshold interrupts */
63 int (*platform_thermal_notify
)(__u64 msr_val
);
64 EXPORT_SYMBOL(platform_thermal_notify
);
66 /* Callback to handle core package threshold_interrupts */
67 int (*platform_thermal_package_notify
)(__u64 msr_val
);
68 EXPORT_SYMBOL_GPL(platform_thermal_package_notify
);
70 /* Callback support of rate control, return true, if
71 * callback has rate control */
72 bool (*platform_thermal_package_rate_control
)(void);
73 EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control
);
76 static DEFINE_PER_CPU(struct thermal_state
, thermal_state
);
78 static atomic_t therm_throt_en
= ATOMIC_INIT(0);
80 static u32 lvtthmr_init __read_mostly
;
83 #define define_therm_throt_device_one_ro(_name) \
84 static DEVICE_ATTR(_name, 0444, \
85 therm_throt_device_show_##_name, \
88 #define define_therm_throt_device_show_func(event, name) \
90 static ssize_t therm_throt_device_show_##event##_##name( \
92 struct device_attribute *attr, \
95 unsigned int cpu = dev->id; \
98 preempt_disable(); /* CPU hotplug */ \
99 if (cpu_online(cpu)) { \
100 ret = sprintf(buf, "%lu\n", \
101 per_cpu(thermal_state, cpu).event.name); \
109 define_therm_throt_device_show_func(core_throttle
, count
);
110 define_therm_throt_device_one_ro(core_throttle_count
);
112 define_therm_throt_device_show_func(core_power_limit
, count
);
113 define_therm_throt_device_one_ro(core_power_limit_count
);
115 define_therm_throt_device_show_func(package_throttle
, count
);
116 define_therm_throt_device_one_ro(package_throttle_count
);
118 define_therm_throt_device_show_func(package_power_limit
, count
);
119 define_therm_throt_device_one_ro(package_power_limit_count
);
121 static struct attribute
*thermal_throttle_attrs
[] = {
122 &dev_attr_core_throttle_count
.attr
,
126 static struct attribute_group thermal_attr_group
= {
127 .attrs
= thermal_throttle_attrs
,
128 .name
= "thermal_throttle"
130 #endif /* CONFIG_SYSFS */
133 #define PACKAGE_LEVEL 1
136 * therm_throt_process - Process thermal throttling event from interrupt
137 * @curr: Whether the condition is current or not (boolean), since the
138 * thermal interrupt normally gets called both when the thermal
139 * event begins and once the event has ended.
141 * This function is called by the thermal interrupt after the
142 * IRQ has been acknowledged.
144 * It will take care of rate limiting and printing messages to the syslog.
146 * Returns: 0 : Event should NOT be further logged, i.e. still in
147 * "timeout" from previous log message.
148 * 1 : Event should be logged further, and a message has been
149 * printed to the syslog.
151 static int therm_throt_process(bool new_event
, int event
, int level
)
153 struct _thermal_state
*state
;
154 unsigned int this_cpu
= smp_processor_id();
157 struct thermal_state
*pstate
= &per_cpu(thermal_state
, this_cpu
);
159 now
= get_jiffies_64();
160 if (level
== CORE_LEVEL
) {
161 if (event
== THERMAL_THROTTLING_EVENT
)
162 state
= &pstate
->core_throttle
;
163 else if (event
== POWER_LIMIT_EVENT
)
164 state
= &pstate
->core_power_limit
;
167 } else if (level
== PACKAGE_LEVEL
) {
168 if (event
== THERMAL_THROTTLING_EVENT
)
169 state
= &pstate
->package_throttle
;
170 else if (event
== POWER_LIMIT_EVENT
)
171 state
= &pstate
->package_power_limit
;
177 old_event
= state
->new_event
;
178 state
->new_event
= new_event
;
183 if (time_before64(now
, state
->next_check
) &&
184 state
->count
!= state
->last_count
)
187 state
->next_check
= now
+ CHECK_INTERVAL
;
188 state
->last_count
= state
->count
;
190 /* if we just entered the thermal event */
192 if (event
== THERMAL_THROTTLING_EVENT
)
193 printk(KERN_CRIT
"CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
195 level
== CORE_LEVEL
? "Core" : "Package",
200 if (event
== THERMAL_THROTTLING_EVENT
)
201 printk(KERN_INFO
"CPU%d: %s temperature/speed normal\n",
203 level
== CORE_LEVEL
? "Core" : "Package");
210 static int thresh_event_valid(int level
, int event
)
212 struct _thermal_state
*state
;
213 unsigned int this_cpu
= smp_processor_id();
214 struct thermal_state
*pstate
= &per_cpu(thermal_state
, this_cpu
);
215 u64 now
= get_jiffies_64();
217 if (level
== PACKAGE_LEVEL
)
218 state
= (event
== 0) ? &pstate
->pkg_thresh0
:
219 &pstate
->pkg_thresh1
;
221 state
= (event
== 0) ? &pstate
->core_thresh0
:
222 &pstate
->core_thresh1
;
224 if (time_before64(now
, state
->next_check
))
227 state
->next_check
= now
+ CHECK_INTERVAL
;
232 static bool int_pln_enable
;
233 static int __init
int_pln_enable_setup(char *s
)
235 int_pln_enable
= true;
239 __setup("int_pln_enable", int_pln_enable_setup
);
242 /* Add/Remove thermal_throttle interface for CPU device: */
243 static __cpuinit
int thermal_throttle_add_dev(struct device
*dev
,
247 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
249 err
= sysfs_create_group(&dev
->kobj
, &thermal_attr_group
);
253 if (cpu_has(c
, X86_FEATURE_PLN
) && int_pln_enable
)
254 err
= sysfs_add_file_to_group(&dev
->kobj
,
255 &dev_attr_core_power_limit_count
.attr
,
256 thermal_attr_group
.name
);
257 if (cpu_has(c
, X86_FEATURE_PTS
)) {
258 err
= sysfs_add_file_to_group(&dev
->kobj
,
259 &dev_attr_package_throttle_count
.attr
,
260 thermal_attr_group
.name
);
261 if (cpu_has(c
, X86_FEATURE_PLN
) && int_pln_enable
)
262 err
= sysfs_add_file_to_group(&dev
->kobj
,
263 &dev_attr_package_power_limit_count
.attr
,
264 thermal_attr_group
.name
);
270 static __cpuinit
void thermal_throttle_remove_dev(struct device
*dev
)
272 sysfs_remove_group(&dev
->kobj
, &thermal_attr_group
);
275 /* Mutex protecting device creation against CPU hotplug: */
276 static DEFINE_MUTEX(therm_cpu_lock
);
278 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
280 thermal_throttle_cpu_callback(struct notifier_block
*nfb
,
281 unsigned long action
,
284 unsigned int cpu
= (unsigned long)hcpu
;
288 dev
= get_cpu_device(cpu
);
292 case CPU_UP_PREPARE_FROZEN
:
293 mutex_lock(&therm_cpu_lock
);
294 err
= thermal_throttle_add_dev(dev
, cpu
);
295 mutex_unlock(&therm_cpu_lock
);
298 case CPU_UP_CANCELED
:
299 case CPU_UP_CANCELED_FROZEN
:
301 case CPU_DEAD_FROZEN
:
302 mutex_lock(&therm_cpu_lock
);
303 thermal_throttle_remove_dev(dev
);
304 mutex_unlock(&therm_cpu_lock
);
307 return notifier_from_errno(err
);
310 static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata
=
312 .notifier_call
= thermal_throttle_cpu_callback
,
315 static __init
int thermal_throttle_init_device(void)
317 unsigned int cpu
= 0;
320 if (!atomic_read(&therm_throt_en
))
323 register_hotcpu_notifier(&thermal_throttle_cpu_notifier
);
325 #ifdef CONFIG_HOTPLUG_CPU
326 mutex_lock(&therm_cpu_lock
);
328 /* connect live CPUs to sysfs */
329 for_each_online_cpu(cpu
) {
330 err
= thermal_throttle_add_dev(get_cpu_device(cpu
), cpu
);
333 #ifdef CONFIG_HOTPLUG_CPU
334 mutex_unlock(&therm_cpu_lock
);
339 device_initcall(thermal_throttle_init_device
);
341 #endif /* CONFIG_SYSFS */
343 static void notify_package_thresholds(__u64 msr_val
)
345 bool notify_thres_0
= false;
346 bool notify_thres_1
= false;
348 if (!platform_thermal_package_notify
)
351 /* lower threshold check */
352 if (msr_val
& THERM_LOG_THRESHOLD0
)
353 notify_thres_0
= true;
354 /* higher threshold check */
355 if (msr_val
& THERM_LOG_THRESHOLD1
)
356 notify_thres_1
= true;
358 if (!notify_thres_0
&& !notify_thres_1
)
361 if (platform_thermal_package_rate_control
&&
362 platform_thermal_package_rate_control()) {
363 /* Rate control is implemented in callback */
364 platform_thermal_package_notify(msr_val
);
368 /* lower threshold reached */
369 if (notify_thres_0
&& thresh_event_valid(PACKAGE_LEVEL
, 0))
370 platform_thermal_package_notify(msr_val
);
371 /* higher threshold reached */
372 if (notify_thres_1
&& thresh_event_valid(PACKAGE_LEVEL
, 1))
373 platform_thermal_package_notify(msr_val
);
376 static void notify_thresholds(__u64 msr_val
)
378 /* check whether the interrupt handler is defined;
379 * otherwise simply return
381 if (!platform_thermal_notify
)
384 /* lower threshold reached */
385 if ((msr_val
& THERM_LOG_THRESHOLD0
) &&
386 thresh_event_valid(CORE_LEVEL
, 0))
387 platform_thermal_notify(msr_val
);
388 /* higher threshold reached */
389 if ((msr_val
& THERM_LOG_THRESHOLD1
) &&
390 thresh_event_valid(CORE_LEVEL
, 1))
391 platform_thermal_notify(msr_val
);
394 /* Thermal transition interrupt handler */
395 static void intel_thermal_interrupt(void)
399 rdmsrl(MSR_IA32_THERM_STATUS
, msr_val
);
401 /* Check for violation of core thermal thresholds*/
402 notify_thresholds(msr_val
);
404 if (therm_throt_process(msr_val
& THERM_STATUS_PROCHOT
,
405 THERMAL_THROTTLING_EVENT
,
407 mce_log_therm_throt_event(msr_val
);
409 if (this_cpu_has(X86_FEATURE_PLN
) && int_pln_enable
)
410 therm_throt_process(msr_val
& THERM_STATUS_POWER_LIMIT
,
414 if (this_cpu_has(X86_FEATURE_PTS
)) {
415 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS
, msr_val
);
416 /* check violations of package thermal thresholds */
417 notify_package_thresholds(msr_val
);
418 therm_throt_process(msr_val
& PACKAGE_THERM_STATUS_PROCHOT
,
419 THERMAL_THROTTLING_EVENT
,
421 if (this_cpu_has(X86_FEATURE_PLN
) && int_pln_enable
)
422 therm_throt_process(msr_val
&
423 PACKAGE_THERM_STATUS_POWER_LIMIT
,
429 static void unexpected_thermal_interrupt(void)
431 printk(KERN_ERR
"CPU%d: Unexpected LVT thermal interrupt!\n",
435 static void (*smp_thermal_vector
)(void) = unexpected_thermal_interrupt
;
437 static inline void __smp_thermal_interrupt(void)
439 inc_irq_stat(irq_thermal_count
);
440 smp_thermal_vector();
443 asmlinkage
void smp_thermal_interrupt(struct pt_regs
*regs
)
446 __smp_thermal_interrupt();
450 asmlinkage
void smp_trace_thermal_interrupt(struct pt_regs
*regs
)
453 trace_thermal_apic_entry(THERMAL_APIC_VECTOR
);
454 __smp_thermal_interrupt();
455 trace_thermal_apic_exit(THERMAL_APIC_VECTOR
);
459 /* Thermal monitoring depends on APIC, ACPI and clock modulation */
460 static int intel_thermal_supported(struct cpuinfo_x86
*c
)
464 if (!cpu_has(c
, X86_FEATURE_ACPI
) || !cpu_has(c
, X86_FEATURE_ACC
))
469 void __init
mcheck_intel_therm_init(void)
472 * This function is only called on boot CPU. Save the init thermal
473 * LVT value on BSP and use that value to restore APs' thermal LVT
474 * entry BIOS programmed later
476 if (intel_thermal_supported(&boot_cpu_data
))
477 lvtthmr_init
= apic_read(APIC_LVTTHMR
);
480 void intel_init_thermal(struct cpuinfo_x86
*c
)
482 unsigned int cpu
= smp_processor_id();
486 if (!intel_thermal_supported(c
))
490 * First check if its enabled already, in which case there might
491 * be some SMM goo which handles it, so we can't even put a handler
492 * since it might be delivered via SMI already:
494 rdmsr(MSR_IA32_MISC_ENABLE
, l
, h
);
498 * The initial value of thermal LVT entries on all APs always reads
499 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
500 * sequence to them and LVT registers are reset to 0s except for
501 * the mask bits which are set to 1s when APs receive INIT IPI.
502 * If BIOS takes over the thermal interrupt and sets its interrupt
503 * delivery mode to SMI (not fixed), it restores the value that the
504 * BIOS has programmed on AP based on BSP's info we saved since BIOS
505 * is always setting the same value for all threads/cores.
507 if ((h
& APIC_DM_FIXED_MASK
) != APIC_DM_FIXED
)
508 apic_write(APIC_LVTTHMR
, lvtthmr_init
);
511 if ((l
& MSR_IA32_MISC_ENABLE_TM1
) && (h
& APIC_DM_SMI
)) {
513 "CPU%d: Thermal monitoring handled by SMI\n", cpu
);
517 /* Check whether a vector already exists */
518 if (h
& APIC_VECTOR_MASK
) {
520 "CPU%d: Thermal LVT vector (%#x) already installed\n",
521 cpu
, (h
& APIC_VECTOR_MASK
));
525 /* early Pentium M models use different method for enabling TM2 */
526 if (cpu_has(c
, X86_FEATURE_TM2
)) {
527 if (c
->x86
== 6 && (c
->x86_model
== 9 || c
->x86_model
== 13)) {
528 rdmsr(MSR_THERM2_CTL
, l
, h
);
529 if (l
& MSR_THERM2_CTL_TM_SELECT
)
531 } else if (l
& MSR_IA32_MISC_ENABLE_TM2
)
535 /* We'll mask the thermal vector in the lapic till we're ready: */
536 h
= THERMAL_APIC_VECTOR
| APIC_DM_FIXED
| APIC_LVT_MASKED
;
537 apic_write(APIC_LVTTHMR
, h
);
539 rdmsr(MSR_IA32_THERM_INTERRUPT
, l
, h
);
540 if (cpu_has(c
, X86_FEATURE_PLN
) && !int_pln_enable
)
541 wrmsr(MSR_IA32_THERM_INTERRUPT
,
542 (l
| (THERM_INT_LOW_ENABLE
543 | THERM_INT_HIGH_ENABLE
)) & ~THERM_INT_PLN_ENABLE
, h
);
544 else if (cpu_has(c
, X86_FEATURE_PLN
) && int_pln_enable
)
545 wrmsr(MSR_IA32_THERM_INTERRUPT
,
546 l
| (THERM_INT_LOW_ENABLE
547 | THERM_INT_HIGH_ENABLE
| THERM_INT_PLN_ENABLE
), h
);
549 wrmsr(MSR_IA32_THERM_INTERRUPT
,
550 l
| (THERM_INT_LOW_ENABLE
| THERM_INT_HIGH_ENABLE
), h
);
552 if (cpu_has(c
, X86_FEATURE_PTS
)) {
553 rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT
, l
, h
);
554 if (cpu_has(c
, X86_FEATURE_PLN
) && !int_pln_enable
)
555 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT
,
556 (l
| (PACKAGE_THERM_INT_LOW_ENABLE
557 | PACKAGE_THERM_INT_HIGH_ENABLE
))
558 & ~PACKAGE_THERM_INT_PLN_ENABLE
, h
);
559 else if (cpu_has(c
, X86_FEATURE_PLN
) && int_pln_enable
)
560 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT
,
561 l
| (PACKAGE_THERM_INT_LOW_ENABLE
562 | PACKAGE_THERM_INT_HIGH_ENABLE
563 | PACKAGE_THERM_INT_PLN_ENABLE
), h
);
565 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT
,
566 l
| (PACKAGE_THERM_INT_LOW_ENABLE
567 | PACKAGE_THERM_INT_HIGH_ENABLE
), h
);
570 smp_thermal_vector
= intel_thermal_interrupt
;
572 rdmsr(MSR_IA32_MISC_ENABLE
, l
, h
);
573 wrmsr(MSR_IA32_MISC_ENABLE
, l
| MSR_IA32_MISC_ENABLE_TM1
, h
);
575 /* Unmask the thermal vector: */
576 l
= apic_read(APIC_LVTTHMR
);
577 apic_write(APIC_LVTTHMR
, l
& ~APIC_LVT_MASKED
);
579 printk_once(KERN_INFO
"CPU0: Thermal monitoring enabled (%s)\n",
580 tm2
? "TM2" : "TM1");
582 /* enable thermal throttle processing */
583 atomic_set(&therm_throt_en
, 1);