perfcounters: enable lowlevel pmc code to schedule counters
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_counter.c
index 30e7ebf78275975b61b6c9ad0a9c052beb750aff..74090a393a7ce637fb3c73b1b9971db034ae3de9 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/kdebug.h>
 #include <linux/sched.h>
 
-#include <asm/intel_arch_perfmon.h>
+#include <asm/perf_counter.h>
 #include <asm/apic.h>
 
 static bool perf_counters_initialized __read_mostly;
@@ -24,16 +24,14 @@ static bool perf_counters_initialized __read_mostly;
 /*
  * Number of (generic) HW counters:
  */
-static int nr_hw_counters __read_mostly;
-static u32 perf_counter_mask __read_mostly;
+static int nr_counters_generic __read_mostly;
+static u64 perf_counter_mask __read_mostly;
 
-/* No support for fixed function counters yet */
-
-#define MAX_HW_COUNTERS                8
+static int nr_counters_fixed __read_mostly;
 
 struct cpu_hw_counters {
-       struct perf_counter     *counters[MAX_HW_COUNTERS];
-       unsigned long           used[BITS_TO_LONGS(MAX_HW_COUNTERS)];
+       struct perf_counter     *counters[X86_PMC_IDX_MAX];
+       unsigned long           used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 };
 
 /*
@@ -41,7 +39,7 @@ struct cpu_hw_counters {
  */
 static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
 
-const int intel_perfmon_event_map[] =
+static const int intel_perfmon_event_map[] =
 {
   [PERF_COUNT_CYCLES]                  = 0x003c,
   [PERF_COUNT_INSTRUCTIONS]            = 0x00c0,
@@ -51,15 +49,55 @@ const int intel_perfmon_event_map[] =
   [PERF_COUNT_BRANCH_MISSES]           = 0x00c5,
 };
 
-const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
+static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
+
+/*
+ * Propagate counter elapsed time into the generic counter.
+ * Can only be executed on the CPU where the counter is active.
+ * Returns the delta events processed.
+ */
+static void
+x86_perf_counter_update(struct perf_counter *counter,
+                       struct hw_perf_counter *hwc, int idx)
+{
+       u64 prev_raw_count, new_raw_count, delta;
+
+       /*
+        * Careful: an NMI might modify the previous counter value.
+        *
+        * Our tactic to handle this is to first atomically read and
+        * exchange a new raw count - then add that new-prev delta
+        * count to the generic counter atomically:
+        */
+again:
+       prev_raw_count = atomic64_read(&hwc->prev_count);
+       rdmsrl(hwc->counter_base + idx, new_raw_count);
+
+       if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+                                       new_raw_count) != prev_raw_count)
+               goto again;
+
+       /*
+        * Now we have the new raw value and have updated the prev
+        * timestamp already. We can now calculate the elapsed delta
+        * (counter-)time and add that to the generic counter.
+        *
+        * Careful, not all hw sign-extends above the physical width
+        * of the count, so we do that by clipping the delta to 32 bits:
+        */
+       delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
+
+       atomic64_add(delta, &counter->count);
+       atomic64_sub(delta, &hwc->period_left);
+}
 
 /*
  * Setup the hardware configuration for a given hw_event_type
  */
-int hw_perf_counter_init(struct perf_counter *counter)
+static int __hw_perf_counter_init(struct perf_counter *counter)
 {
+       struct perf_counter_hw_event *hw_event = &counter->hw_event;
        struct hw_perf_counter *hwc = &counter->hw;
-       u32 hw_event_type = counter->event.hw_event_type;
 
        if (unlikely(!perf_counters_initialized))
                return -EINVAL;
@@ -77,37 +115,36 @@ int hw_perf_counter_init(struct perf_counter *counter)
        hwc->nmi = 0;
        if (capable(CAP_SYS_ADMIN)) {
                hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
-               if (hw_event_type & PERF_COUNT_NMI)
+               if (hw_event->nmi)
                        hwc->nmi = 1;
        }
 
-       hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
-       hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
+       hwc->config_base        = MSR_ARCH_PERFMON_EVENTSEL0;
+       hwc->counter_base       = MSR_ARCH_PERFMON_PERFCTR0;
 
-       hwc->irq_period = counter->event.hw_event_period;
+       hwc->irq_period         = hw_event->irq_period;
        /*
         * Intel PMCs cannot be accessed sanely above 32 bit width,
         * so we install an artificial 1<<31 period regardless of
         * the generic counter period:
         */
-       if (!hwc->irq_period)
+       if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
                hwc->irq_period = 0x7FFFFFFF;
 
-       hwc->next_count = -((s32) hwc->irq_period);
+       atomic64_set(&hwc->period_left, hwc->irq_period);
 
        /*
         * Raw event type provide the config in the event structure
         */
-       hw_event_type &= ~PERF_COUNT_NMI;
-       if (hw_event_type == PERF_COUNT_RAW) {
-               hwc->config |= counter->event.hw_raw_ctrl;
+       if (hw_event->raw) {
+               hwc->config |= hw_event->type;
        } else {
-               if (hw_event_type >= max_intel_perfmon_events)
+               if (hw_event->type >= max_intel_perfmon_events)
                        return -EINVAL;
                /*
                 * The generic map:
                 */
-               hwc->config |= intel_perfmon_event_map[hw_event_type];
+               hwc->config |= intel_perfmon_event_map[hw_event->type];
        }
        counter->wakeup_pending = 0;
 
@@ -116,142 +153,130 @@ int hw_perf_counter_init(struct perf_counter *counter)
 
 void hw_perf_enable_all(void)
 {
-       wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
-}
+       if (unlikely(!perf_counters_initialized))
+               return;
 
-void hw_perf_restore_ctrl(u64 ctrl)
-{
-       wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0);
+       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask);
 }
-EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl);
 
-u64 hw_perf_disable_all(void)
+u64 hw_perf_save_disable(void)
 {
        u64 ctrl;
 
+       if (unlikely(!perf_counters_initialized))
+               return 0;
+
        rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
-       wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
+       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
        return ctrl;
 }
-EXPORT_SYMBOL_GPL(hw_perf_disable_all);
+EXPORT_SYMBOL_GPL(hw_perf_save_disable);
 
-static inline void
-__hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx)
+void hw_perf_restore(u64 ctrl)
 {
-       wrmsr(hwc->config_base + idx, hwc->config, 0);
-}
+       if (unlikely(!perf_counters_initialized))
+               return;
 
-static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]);
+       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
+}
+EXPORT_SYMBOL_GPL(hw_perf_restore);
 
-static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx)
+static inline void
+__pmc_generic_disable(struct perf_counter *counter,
+                          struct hw_perf_counter *hwc, unsigned int idx)
 {
-       per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count;
+       int err;
 
-       wrmsr(hwc->counter_base + idx, hwc->next_count, 0);
+       err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
 }
 
-static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx)
-{
-       wrmsr(hwc->config_base + idx,
-             hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
-}
+static DEFINE_PER_CPU(u64, prev_left[X86_PMC_MAX_GENERIC]);
 
-void hw_perf_counter_enable(struct perf_counter *counter)
+/*
+ * Set the next IRQ period, based on the hwc->period_left value.
+ * To be called with the counter disabled in hw:
+ */
+static void
+__hw_perf_counter_set_period(struct perf_counter *counter,
+                            struct hw_perf_counter *hwc, int idx)
 {
-       struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
-       struct hw_perf_counter *hwc = &counter->hw;
-       int idx = hwc->idx;
+       s32 left = atomic64_read(&hwc->period_left);
+       s32 period = hwc->irq_period;
 
-       /* Try to get the previous counter again */
-       if (test_and_set_bit(idx, cpuc->used)) {
-               idx = find_first_zero_bit(cpuc->used, nr_hw_counters);
-               set_bit(idx, cpuc->used);
-               hwc->idx = idx;
+       /*
+        * If we are way outside a reasoable range then just skip forward:
+        */
+       if (unlikely(left <= -period)) {
+               left = period;
+               atomic64_set(&hwc->period_left, left);
        }
 
-       perf_counters_lapic_init(hwc->nmi);
+       if (unlikely(left <= 0)) {
+               left += period;
+               atomic64_set(&hwc->period_left, left);
+       }
 
-       __hw_perf_counter_disable(hwc, idx);
+       per_cpu(prev_left[idx], smp_processor_id()) = left;
 
-       cpuc->counters[idx] = counter;
+       /*
+        * The hw counter starts counting from this counter offset,
+        * mark it to be able to extra future deltas:
+        */
+       atomic64_set(&hwc->prev_count, (u64)(s64)-left);
 
-       __hw_perf_counter_set_period(hwc, idx);
-       __hw_perf_counter_enable(hwc, idx);
+       wrmsr(hwc->counter_base + idx, -left, 0);
 }
 
-#ifdef CONFIG_X86_64
-static inline void atomic64_counter_set(struct perf_counter *counter, u64 val)
+static void
+__pmc_generic_enable(struct perf_counter *counter,
+                         struct hw_perf_counter *hwc, int idx)
 {
-       atomic64_set(&counter->count, val);
+       wrmsr(hwc->config_base + idx,
+             hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
 }
 
-static inline u64 atomic64_counter_read(struct perf_counter *counter)
+static int fixed_mode_idx(struct hw_perf_counter *hwc)
 {
-       return atomic64_read(&counter->count);
+       return -1;
 }
-#else
+
 /*
- * Todo: add proper atomic64_t support to 32-bit x86:
+ * Find a PMC slot for the freshly enabled / scheduled in counter:
  */
-static inline void atomic64_counter_set(struct perf_counter *counter, u64 val64)
+static int pmc_generic_enable(struct perf_counter *counter)
 {
-       u32 *val32 = (void *)&val64;
-
-       atomic_set(counter->count32 + 0, *(val32 + 0));
-       atomic_set(counter->count32 + 1, *(val32 + 1));
-}
-
-static inline u64 atomic64_counter_read(struct perf_counter *counter)
-{
-       return atomic_read(counter->count32 + 0) |
-               (u64) atomic_read(counter->count32 + 1) << 32;
-}
-#endif
-
-static void __hw_perf_save_counter(struct perf_counter *counter,
-                                  struct hw_perf_counter *hwc, int idx)
-{
-       s64 raw = -1;
-       s64 delta;
+       struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+       struct hw_perf_counter *hwc = &counter->hw;
+       int idx = hwc->idx;
 
-       /*
-        * Get the raw hw counter value:
-        */
-       rdmsrl(hwc->counter_base + idx, raw);
+       /* Try to get the previous counter again */
+       if (test_and_set_bit(idx, cpuc->used)) {
+               idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
+               if (idx == nr_counters_generic)
+                       return -EAGAIN;
+               set_bit(idx, cpuc->used);
+               hwc->idx = idx;
+       }
 
-       /*
-        * Rebase it to zero (it started counting at -irq_period),
-        * to see the delta since ->prev_count:
-        */
-       delta = (s64)hwc->irq_period + (s64)(s32)raw;
+       perf_counters_lapic_init(hwc->nmi);
 
-       atomic64_counter_set(counter, hwc->prev_count + delta);
+       __pmc_generic_disable(counter, hwc, idx);
 
-       /*
-        * Adjust the ->prev_count offset - if we went beyond
-        * irq_period of units, then we got an IRQ and the counter
-        * was set back to -irq_period:
-        */
-       while (delta >= (s64)hwc->irq_period) {
-               hwc->prev_count += hwc->irq_period;
-               delta -= (s64)hwc->irq_period;
-       }
+       cpuc->counters[idx] = counter;
 
-       /*
-        * Calculate the next raw counter value we'll write into
-        * the counter at the next sched-in time:
-        */
-       delta -= (s64)hwc->irq_period;
+       __hw_perf_counter_set_period(counter, hwc, idx);
+       __pmc_generic_enable(counter, hwc, idx);
 
-       hwc->next_count = (s32)delta;
+       return 0;
 }
 
 void perf_counter_print_debug(void)
 {
-       u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count;
+       u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left;
        int cpu, idx;
 
-       if (!nr_hw_counters)
+       if (!nr_counters_generic)
                return;
 
        local_irq_disable();
@@ -267,51 +292,38 @@ void perf_counter_print_debug(void)
        printk(KERN_INFO "CPU#%d: status:     %016llx\n", cpu, status);
        printk(KERN_INFO "CPU#%d: overflow:   %016llx\n", cpu, overflow);
 
-       for (idx = 0; idx < nr_hw_counters; idx++) {
+       for (idx = 0; idx < nr_counters_generic; idx++) {
                rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
                rdmsrl(MSR_ARCH_PERFMON_PERFCTR0  + idx, pmc_count);
 
-               next_count = per_cpu(prev_next_count[idx], cpu);
+               prev_left = per_cpu(prev_left[idx], cpu);
 
                printk(KERN_INFO "CPU#%d: PMC%d ctrl:  %016llx\n",
                        cpu, idx, pmc_ctrl);
                printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n",
                        cpu, idx, pmc_count);
-               printk(KERN_INFO "CPU#%d: PMC%d next:  %016llx\n",
-                       cpu, idx, next_count);
+               printk(KERN_INFO "CPU#%d: PMC%d left:  %016llx\n",
+                       cpu, idx, prev_left);
        }
        local_irq_enable();
 }
 
-void hw_perf_counter_disable(struct perf_counter *counter)
+static void pmc_generic_disable(struct perf_counter *counter)
 {
        struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
        struct hw_perf_counter *hwc = &counter->hw;
        unsigned int idx = hwc->idx;
 
-       __hw_perf_counter_disable(hwc, idx);
+       __pmc_generic_disable(counter, hwc, idx);
 
        clear_bit(idx, cpuc->used);
        cpuc->counters[idx] = NULL;
-       __hw_perf_save_counter(counter, hwc, idx);
-}
 
-void hw_perf_counter_read(struct perf_counter *counter)
-{
-       struct hw_perf_counter *hwc = &counter->hw;
-       unsigned long addr = hwc->counter_base + hwc->idx;
-       s64 offs, val = -1LL;
-       s32 val32;
-
-       /* Careful: NMI might modify the counter offset */
-       do {
-               offs = hwc->prev_count;
-               rdmsrl(addr, val);
-       } while (offs != hwc->prev_count);
-
-       val32 = (s32) val;
-       val =  (s64)hwc->irq_period + (s64)val32;
-       atomic64_counter_set(counter, hwc->prev_count + val);
+       /*
+        * Drain the remaining delta count out of a counter
+        * that we are disabling:
+        */
+       x86_perf_counter_update(counter, hwc, idx);
 }
 
 static void perf_store_irq_data(struct perf_counter *counter, u64 data)
@@ -329,7 +341,8 @@ static void perf_store_irq_data(struct perf_counter *counter, u64 data)
 }
 
 /*
- * NMI-safe enable method:
+ * Save and restart an expired counter. Called by NMI contexts,
+ * so it has to be careful about preempting normal counter ops:
  */
 static void perf_save_and_restart(struct perf_counter *counter)
 {
@@ -339,41 +352,25 @@ static void perf_save_and_restart(struct perf_counter *counter)
 
        rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
 
-       __hw_perf_save_counter(counter, hwc, idx);
-       __hw_perf_counter_set_period(hwc, idx);
+       x86_perf_counter_update(counter, hwc, idx);
+       __hw_perf_counter_set_period(counter, hwc, idx);
 
        if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE)
-               __hw_perf_counter_enable(hwc, idx);
+               __pmc_generic_enable(counter, hwc, idx);
 }
 
 static void
-perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
+perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
 {
-       struct perf_counter_context *ctx = leader->ctx;
-       struct perf_counter *counter;
-       int bit;
-
-       list_for_each_entry(counter, &ctx->counters, list) {
-               if (counter->record_type != PERF_RECORD_SIMPLE ||
-                   counter == leader)
-                       continue;
+       struct perf_counter *counter, *group_leader = sibling->group_leader;
 
-               if (counter->active) {
-                       /*
-                        * When counter was not in the overflow mask, we have to
-                        * read it from hardware. We read it as well, when it
-                        * has not been read yet and clear the bit in the
-                        * status mask.
-                        */
-                       bit = counter->hw.idx;
-                       if (!test_bit(bit, (unsigned long *) overflown) ||
-                           test_bit(bit, (unsigned long *) status)) {
-                               clear_bit(bit, (unsigned long *) status);
-                               perf_save_and_restart(counter);
-                       }
-               }
-               perf_store_irq_data(leader, counter->event.hw_event_type);
-               perf_store_irq_data(leader, atomic64_counter_read(counter));
+       /*
+        * Store sibling timestamps (if any):
+        */
+       list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
+               x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
+               perf_store_irq_data(sibling, counter->hw_event.type);
+               perf_store_irq_data(sibling, atomic64_read(&counter->count));
        }
 }
 
@@ -390,7 +387,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
        rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
 
        /* Disable counters globally */
-       wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
+       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
        ack_APIC_irq();
 
        cpuc = &per_cpu(cpu_hw_counters, cpu);
@@ -401,7 +398,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
 
 again:
        ack = status;
-       for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
+       for_each_bit(bit, (unsigned long *) &status, nr_counters_generic) {
                struct perf_counter *counter = cpuc->counters[bit];
 
                clear_bit(bit, (unsigned long *) &status);
@@ -410,23 +407,19 @@ again:
 
                perf_save_and_restart(counter);
 
-               switch (counter->record_type) {
+               switch (counter->hw_event.record_type) {
                case PERF_RECORD_SIMPLE:
                        continue;
                case PERF_RECORD_IRQ:
                        perf_store_irq_data(counter, instruction_pointer(regs));
                        break;
                case PERF_RECORD_GROUP:
-                       perf_store_irq_data(counter,
-                                           counter->event.hw_event_type);
-                       perf_store_irq_data(counter,
-                                           atomic64_counter_read(counter));
                        perf_handle_group(counter, &status, &ack);
                        break;
                }
                /*
                 * From NMI context we cannot call into the scheduler to
-                * do a task wakeup - but we mark these counters as
+                * do a task wakeup - but we mark these generic as
                 * wakeup_pending and initate a wakeup callback:
                 */
                if (nmi) {
@@ -437,7 +430,7 @@ again:
                }
        }
 
-       wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0);
+       wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
 
        /*
         * Repeat if there is more work to be done:
@@ -449,17 +442,13 @@ out:
        /*
         * Restore - do not reenable when global enable is off:
         */
-       wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0);
+       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
 }
 
 void smp_perf_counter_interrupt(struct pt_regs *regs)
 {
        irq_enter();
-#ifdef CONFIG_X86_64
-       add_pda(apic_perf_irqs, 1);
-#else
-       per_cpu(irq_stat, smp_processor_id()).apic_perf_irqs++;
-#endif
+       inc_irq_stat(apic_perf_irqs);
        apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
        __smp_perf_counter_interrupt(regs, 0);
 
@@ -479,7 +468,7 @@ void perf_counter_notify(struct pt_regs *regs)
        cpu = smp_processor_id();
        cpuc = &per_cpu(cpu_hw_counters, cpu);
 
-       for_each_bit(bit, cpuc->used, nr_hw_counters) {
+       for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) {
                struct perf_counter *counter = cpuc->counters[bit];
 
                if (!counter)
@@ -538,8 +527,9 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
 void __init init_hw_perf_counters(void)
 {
        union cpuid10_eax eax;
-       unsigned int unused;
        unsigned int ebx;
+       unsigned int unused;
+       union cpuid10_edx edx;
 
        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
                return;
@@ -548,28 +538,62 @@ void __init init_hw_perf_counters(void)
         * Check whether the Architectural PerfMon supports
         * Branch Misses Retired Event or not.
         */
-       cpuid(10, &(eax.full), &ebx, &unused, &unused);
+       cpuid(10, &eax.full, &ebx, &unused, &edx.full);
        if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
                return;
 
        printk(KERN_INFO "Intel Performance Monitoring support detected.\n");
 
-       printk(KERN_INFO "... version:      %d\n", eax.split.version_id);
-       printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters);
-       nr_hw_counters = eax.split.num_counters;
-       if (nr_hw_counters > MAX_HW_COUNTERS) {
-               nr_hw_counters = MAX_HW_COUNTERS;
+       printk(KERN_INFO "... version:         %d\n", eax.split.version_id);
+       printk(KERN_INFO "... num counters:    %d\n", eax.split.num_counters);
+       nr_counters_generic = eax.split.num_counters;
+       if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
+               nr_counters_generic = X86_PMC_MAX_GENERIC;
                WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
-                       nr_hw_counters, MAX_HW_COUNTERS);
+                       nr_counters_generic, X86_PMC_MAX_GENERIC);
        }
-       perf_counter_mask = (1 << nr_hw_counters) - 1;
-       perf_max_counters = nr_hw_counters;
+       perf_counter_mask = (1 << nr_counters_generic) - 1;
+       perf_max_counters = nr_counters_generic;
+
+       printk(KERN_INFO "... bit width:       %d\n", eax.split.bit_width);
+       printk(KERN_INFO "... mask length:     %d\n", eax.split.mask_length);
 
-       printk(KERN_INFO "... bit_width:    %d\n", eax.split.bit_width);
-       printk(KERN_INFO "... mask_length:  %d\n", eax.split.mask_length);
+       nr_counters_fixed = edx.split.num_counters_fixed;
+       if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
+               nr_counters_fixed = X86_PMC_MAX_FIXED;
+               WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
+                       nr_counters_fixed, X86_PMC_MAX_FIXED);
+       }
+       printk(KERN_INFO "... fixed counters:  %d\n", nr_counters_fixed);
+
+       perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+
+       printk(KERN_INFO "... counter mask:    %016Lx\n", perf_counter_mask);
+       perf_counters_initialized = true;
 
        perf_counters_lapic_init(0);
        register_die_notifier(&perf_counter_nmi_notifier);
+}
 
-       perf_counters_initialized = true;
+static void pmc_generic_read(struct perf_counter *counter)
+{
+       x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
+}
+
+static const struct hw_perf_counter_ops x86_perf_counter_ops = {
+       .enable         = pmc_generic_enable,
+       .disable        = pmc_generic_disable,
+       .read           = pmc_generic_read,
+};
+
+const struct hw_perf_counter_ops *
+hw_perf_counter_init(struct perf_counter *counter)
+{
+       int err;
+
+       err = __hw_perf_counter_init(counter);
+       if (err)
+               return NULL;
+
+       return &x86_perf_counter_ops;
 }
This page took 0.040177 seconds and 5 git commands to generate.