struct cpu_hw_counters {
struct perf_counter *counters[X86_PMC_IDX_MAX];
unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long interrupts;
+ u64 global_enable;
};
/*
return 0;
}
-void hw_perf_enable_all(void)
-{
- if (unlikely(!perf_counters_initialized))
- return;
-
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask);
-}
-
u64 hw_perf_save_disable(void)
{
u64 ctrl;
__pmc_generic_disable(struct perf_counter *counter,
struct hw_perf_counter *hwc, unsigned int idx)
{
- int err;
-
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
- return __pmc_fixed_disable(counter, hwc, idx);
-
- err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
+ __pmc_fixed_disable(counter, hwc, idx);
+ else
+ wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
}
static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
struct hw_perf_counter *hwc, int idx)
{
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
- return __pmc_fixed_enable(counter, hwc, idx);
-
- wrmsr(hwc->config_base + idx,
- hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
+ __pmc_fixed_enable(counter, hwc, idx);
+ else
+ wrmsr(hwc->config_base + idx,
+ hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
}
static int
}
}
+/*
+ * Maximum interrupt frequency of 100KHz per CPU
+ */
+#define PERFMON_MAX_INTERRUPTS 100000/HZ
+
/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
{
int bit, cpu = smp_processor_id();
- u64 ack, status, saved_global;
- struct cpu_hw_counters *cpuc;
+ u64 ack, status;
+ struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
/* Disable counters globally */
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
ack_APIC_irq();
- cpuc = &per_cpu(cpu_hw_counters, cpu);
-
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
if (!status)
goto out;
again:
+ inc_irq_stat(apic_perf_irqs);
ack = status;
for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_counter *counter = cpuc->counters[bit];
goto again;
out:
/*
- * Restore - do not reenable when global enable is off:
+ * Restore - do not reenable when global enable is off or throttled:
*/
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
+ if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
+}
+
+void perf_counter_unthrottle(void)
+{
+ struct cpu_hw_counters *cpuc;
+ u64 global_enable;
+
+ if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ return;
+
+ if (unlikely(!perf_counters_initialized))
+ return;
+
+ cpuc = &per_cpu(cpu_hw_counters, smp_processor_id());
+ if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
+ }
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_enable);
+ if (unlikely(cpuc->global_enable && !global_enable))
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
+ cpuc->interrupts = 0;
}
void smp_perf_counter_interrupt(struct pt_regs *regs)
{
irq_enter();
- inc_irq_stat(apic_perf_irqs);
apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
__smp_perf_counter_interrupt(regs, 0);
local_irq_restore(flags);
}
-void __cpuinit perf_counters_lapic_init(int nmi)
+void perf_counters_lapic_init(int nmi)
{
u32 apic_val;
}
static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
- .notifier_call = perf_counter_nmi_handler
+ .notifier_call = perf_counter_nmi_handler,
+ .next = NULL,
+ .priority = 1
};
void __init init_hw_perf_counters(void)