perf_counters: account NMI interrupts
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_counter.c
index 9376771f757b26167c8965cf54955802b84d2dc8..9901e46998d1a59d006eff17111046e0e539ca52 100644 (file)
@@ -33,6 +33,8 @@ static int nr_counters_fixed __read_mostly;
 struct cpu_hw_counters {
        struct perf_counter     *counters[X86_PMC_IDX_MAX];
        unsigned long           used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       unsigned long           interrupts;
+       u64                     global_enable;
 };
 
 /*
@@ -467,6 +469,11 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
        }
 }
 
+/*
+ * Maximum interrupt frequency of 100KHz per CPU
+ */
+#define PERFMON_MAX_INTERRUPTS 100000/HZ
+
 /*
  * This handler is triggered by the local APIC, so the APIC IRQ handling
  * rules apply:
@@ -474,22 +481,21 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
 static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
 {
        int bit, cpu = smp_processor_id();
-       u64 ack, status, saved_global;
-       struct cpu_hw_counters *cpuc;
+       u64 ack, status;
+       struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
 
-       rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
+       rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
 
        /* Disable counters globally */
        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
        ack_APIC_irq();
 
-       cpuc = &per_cpu(cpu_hw_counters, cpu);
-
        rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
        if (!status)
                goto out;
 
 again:
+       inc_irq_stat(apic_perf_irqs);
        ack = status;
        for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_counter *counter = cpuc->counters[bit];
@@ -533,15 +539,38 @@ again:
                goto again;
 out:
        /*
-        * Restore - do not reenable when global enable is off:
+        * Restore - do not reenable when global enable is off or throttled:
         */
-       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
+       if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
+               wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
+}
+
+void perf_counter_unthrottle(void)
+{
+       struct cpu_hw_counters *cpuc;
+       u64 global_enable;
+
+       if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+               return;
+
+       if (unlikely(!perf_counters_initialized))
+               return;
+
+       cpuc = &per_cpu(cpu_hw_counters, smp_processor_id());
+       if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
+               if (printk_ratelimit())
+                       printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
+               wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
+       }
+       rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_enable);
+       if (unlikely(cpuc->global_enable && !global_enable))
+               wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
+       cpuc->interrupts = 0;
 }
 
 void smp_perf_counter_interrupt(struct pt_regs *regs)
 {
        irq_enter();
-       inc_irq_stat(apic_perf_irqs);
        apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
        __smp_perf_counter_interrupt(regs, 0);
 
@@ -576,7 +605,7 @@ void perf_counter_notify(struct pt_regs *regs)
        local_irq_restore(flags);
 }
 
-void __cpuinit perf_counters_lapic_init(int nmi)
+void perf_counters_lapic_init(int nmi)
 {
        u32 apic_val;
 
@@ -614,7 +643,9 @@ perf_counter_nmi_handler(struct notifier_block *self,
 }
 
 static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
-       .notifier_call          = perf_counter_nmi_handler
+       .notifier_call          = perf_counter_nmi_handler,
+       .next                   = NULL,
+       .priority               = 1
 };
 
 void __init init_hw_perf_counters(void)
This page took 0.048537 seconds and 5 git commands to generate.