perf, x86: Implement simple LBR support
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_intel.c
index 977e7544738c5d657c50370856a94569b6bc0df2..44f6ed42a934a1c61a96a569379e515bfe9e67e1 100644 (file)
@@ -1,7 +1,7 @@
 #ifdef CONFIG_CPU_SUP_INTEL
 
 /*
- * Intel PerfMon v3. Used on Core2 and later.
+ * Intel PerfMon, used on Core and later.
  */
 static const u64 intel_perfmon_event_map[] =
 {
@@ -27,8 +27,14 @@ static struct event_constraint intel_core_event_constraints[] =
 
 static struct event_constraint intel_core2_event_constraints[] =
 {
-       FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
-       FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       /*
+        * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
+        * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
+        * ratio between these counters.
+        */
+       /* FIXED_EVENT_CONSTRAINT(0x013c, 2),  CPU_CLK_UNHALTED.REF */
        INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
        INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
        INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
@@ -37,14 +43,16 @@ static struct event_constraint intel_core2_event_constraints[] =
        INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
        INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
        INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
+       INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
        INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
        EVENT_CONSTRAINT_END
 };
 
 static struct event_constraint intel_nehalem_event_constraints[] =
 {
-       FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
-       FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
        INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
        INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
        INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
@@ -58,8 +66,9 @@ static struct event_constraint intel_nehalem_event_constraints[] =
 
 static struct event_constraint intel_westmere_event_constraints[] =
 {
-       FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
-       FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
        INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
        INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
        INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
@@ -68,8 +77,9 @@ static struct event_constraint intel_westmere_event_constraints[] =
 
 static struct event_constraint intel_gen_event_constraints[] =
 {
-       FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
-       FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
        EVENT_CONSTRAINT_END
 };
 
@@ -460,42 +470,6 @@ static u64 intel_pmu_raw_event(u64 hw_event)
        return hw_event & CORE_EVNTSEL_MASK;
 }
 
-static void intel_pmu_enable_bts(u64 config)
-{
-       unsigned long debugctlmsr;
-
-       debugctlmsr = get_debugctlmsr();
-
-       debugctlmsr |= X86_DEBUGCTL_TR;
-       debugctlmsr |= X86_DEBUGCTL_BTS;
-       debugctlmsr |= X86_DEBUGCTL_BTINT;
-
-       if (!(config & ARCH_PERFMON_EVENTSEL_OS))
-               debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
-
-       if (!(config & ARCH_PERFMON_EVENTSEL_USR))
-               debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
-
-       update_debugctlmsr(debugctlmsr);
-}
-
-static void intel_pmu_disable_bts(void)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       unsigned long debugctlmsr;
-
-       if (!cpuc->ds)
-               return;
-
-       debugctlmsr = get_debugctlmsr();
-
-       debugctlmsr &=
-               ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
-                 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
-
-       update_debugctlmsr(debugctlmsr);
-}
-
 static void intel_pmu_disable_all(void)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -504,6 +478,9 @@ static void intel_pmu_disable_all(void)
 
        if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
                intel_pmu_disable_bts();
+
+       intel_pmu_pebs_disable_all();
+       intel_pmu_lbr_disable_all();
 }
 
 static void intel_pmu_enable_all(void)
@@ -521,6 +498,9 @@ static void intel_pmu_enable_all(void)
 
                intel_pmu_enable_bts(event->hw.config);
        }
+
+       intel_pmu_pebs_enable_all();
+       intel_pmu_lbr_enable_all();
 }
 
 static inline u64 intel_pmu_get_status(void)
@@ -537,10 +517,9 @@ static inline void intel_pmu_ack_status(u64 ack)
        wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
 }
 
-static inline void
-intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
+static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
 {
-       int idx = __idx - X86_PMC_IDX_FIXED;
+       int idx = hwc->idx - X86_PMC_IDX_FIXED;
        u64 ctrl_val, mask;
 
        mask = 0xfULL << (idx * 4);
@@ -550,88 +529,30 @@ intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
        (void)checking_wrmsrl(hwc->config_base, ctrl_val);
 }
 
-static void intel_pmu_drain_bts_buffer(void)
+static void intel_pmu_disable_event(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       struct debug_store *ds = cpuc->ds;
-       struct bts_record {
-               u64     from;
-               u64     to;
-               u64     flags;
-       };
-       struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
-       struct bts_record *at, *top;
-       struct perf_output_handle handle;
-       struct perf_event_header header;
-       struct perf_sample_data data;
-       struct pt_regs regs;
-
-       if (!event)
-               return;
-
-       if (!ds)
-               return;
-
-       at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
-       top = (struct bts_record *)(unsigned long)ds->bts_index;
-
-       if (top <= at)
-               return;
-
-       ds->bts_index = ds->bts_buffer_base;
-
-
-       data.period     = event->hw.last_period;
-       data.addr       = 0;
-       data.raw        = NULL;
-       regs.ip         = 0;
-
-       /*
-        * Prepare a generic sample, i.e. fill in the invariant fields.
-        * We will overwrite the from and to address before we output
-        * the sample.
-        */
-       perf_prepare_sample(&header, &data, event, &regs);
-
-       if (perf_output_begin(&handle, event,
-                             header.size * (top - at), 1, 1))
-               return;
-
-       for (; at < top; at++) {
-               data.ip         = at->from;
-               data.addr       = at->to;
-
-               perf_output_sample(&handle, &header, &data, event);
-       }
-
-       perf_output_end(&handle);
-
-       /* There's new data available. */
-       event->hw.interrupts++;
-       event->pending_kill = POLL_IN;
-}
+       struct hw_perf_event *hwc = &event->hw;
 
-static inline void
-intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
-{
-       if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
+       if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
                intel_pmu_disable_bts();
                intel_pmu_drain_bts_buffer();
                return;
        }
 
        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
-               intel_pmu_disable_fixed(hwc, idx);
+               intel_pmu_disable_fixed(hwc);
                return;
        }
 
-       x86_pmu_disable_event(hwc, idx);
+       x86_pmu_disable_event(event);
+
+       if (unlikely(event->attr.precise))
+               intel_pmu_pebs_disable(hwc);
 }
 
-static inline void
-intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
+static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
 {
-       int idx = __idx - X86_PMC_IDX_FIXED;
+       int idx = hwc->idx - X86_PMC_IDX_FIXED;
        u64 ctrl_val, bits, mask;
        int err;
 
@@ -661,9 +582,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
        err = checking_wrmsrl(hwc->config_base, ctrl_val);
 }
 
-static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+static void intel_pmu_enable_event(struct perf_event *event)
 {
-       if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
                if (!__get_cpu_var(cpu_hw_events).enabled)
                        return;
 
@@ -672,11 +595,14 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
        }
 
        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
-               intel_pmu_enable_fixed(hwc, idx);
+               intel_pmu_enable_fixed(hwc);
                return;
        }
 
-       __x86_pmu_enable_event(hwc, idx);
+       if (unlikely(event->attr.precise))
+               intel_pmu_pebs_enable(hwc);
+
+       __x86_pmu_enable_event(hwc);
 }
 
 /*
@@ -685,14 +611,8 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  */
 static int intel_pmu_save_and_restart(struct perf_event *event)
 {
-       struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
-       int ret;
-
-       x86_perf_event_update(event, hwc, idx);
-       ret = x86_perf_event_set_period(event, hwc, idx);
-
-       return ret;
+       x86_perf_event_update(event);
+       return x86_perf_event_set_period(event);
 }
 
 static void intel_pmu_reset(void)
@@ -732,16 +652,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        int bit, loops;
        u64 ack, status;
 
-       data.addr = 0;
-       data.raw = NULL;
+       perf_sample_data_init(&data, 0);
 
        cpuc = &__get_cpu_var(cpu_hw_events);
 
-       perf_disable();
+       intel_pmu_disable_all();
        intel_pmu_drain_bts_buffer();
        status = intel_pmu_get_status();
        if (!status) {
-               perf_enable();
+               intel_pmu_enable_all();
                return 0;
        }
 
@@ -751,16 +670,23 @@ again:
                WARN_ONCE(1, "perfevents: irq loop stuck!\n");
                perf_event_print_debug();
                intel_pmu_reset();
-               perf_enable();
-               return 1;
+               goto done;
        }
 
        inc_irq_stat(apic_perf_irqs);
        ack = status;
+
+       intel_pmu_lbr_read();
+
+       /*
+        * PEBS overflow sets bit 62 in the global status register
+        */
+       if (__test_and_clear_bit(62, (unsigned long *)&status))
+               x86_pmu.drain_pebs(regs);
+
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
-               clear_bit(bit, (unsigned long *) &status);
                if (!test_bit(bit, cpuc->active_mask))
                        continue;
 
@@ -770,7 +696,7 @@ again:
                data.period = event->hw.last_period;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       intel_pmu_disable_event(&event->hw, bit);
+                       x86_pmu_stop(event);
        }
 
        intel_pmu_ack_status(ack);
@@ -782,27 +708,23 @@ again:
        if (status)
                goto again;
 
-       perf_enable();
-
+done:
+       intel_pmu_enable_all();
        return 1;
 }
 
-static struct event_constraint bts_constraint =
-       EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
-
 static struct event_constraint *
-intel_special_constraints(struct perf_event *event)
+intel_bts_constraints(struct perf_event *event)
 {
-       unsigned int hw_event;
-
-       hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned int hw_event, bts_event;
 
-       if (unlikely((hw_event ==
-                     x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
-                    (event->hw.sample_period == 1))) {
+       hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+       bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
 
+       if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
                return &bts_constraint;
-       }
+
        return NULL;
 }
 
@@ -811,7 +733,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
 {
        struct event_constraint *c;
 
-       c = intel_special_constraints(event);
+       c = intel_bts_constraints(event);
+       if (c)
+               return c;
+
+       c = intel_pebs_constraints(event);
        if (c)
                return c;
 
@@ -860,9 +786,10 @@ static __initconst struct x86_pmu intel_pmu = {
         * the generic event period:
         */
        .max_period             = (1ULL << 31) - 1,
-       .enable_bts             = intel_pmu_enable_bts,
-       .disable_bts            = intel_pmu_disable_bts,
-       .get_event_constraints  = intel_get_event_constraints
+       .get_event_constraints  = intel_get_event_constraints,
+
+       .cpu_starting           = init_debug_store_on_cpu,
+       .cpu_dying              = fini_debug_store_on_cpu,
 };
 
 static __init int intel_pmu_init(void)
@@ -908,6 +835,8 @@ static __init int intel_pmu_init(void)
        if (version > 1)
                x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
 
+       intel_ds_init();
+
        /*
         * Install the hw-cache-events table:
         */
@@ -923,6 +852,8 @@ static __init int intel_pmu_init(void)
                memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
+               intel_pmu_lbr_init_core();
+
                x86_pmu.event_constraints = intel_core2_event_constraints;
                pr_cont("Core2 events, ");
                break;
@@ -932,13 +863,18 @@ static __init int intel_pmu_init(void)
                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
+               intel_pmu_lbr_init_nhm();
+
                x86_pmu.event_constraints = intel_nehalem_event_constraints;
                pr_cont("Nehalem/Corei7 events, ");
                break;
-       case 28:
+
+       case 28: /* Atom */
                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
+               intel_pmu_lbr_init_atom();
+
                x86_pmu.event_constraints = intel_gen_event_constraints;
                pr_cont("Atom events, ");
                break;
@@ -948,9 +884,12 @@ static __init int intel_pmu_init(void)
                memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
+               intel_pmu_lbr_init_nhm();
+
                x86_pmu.event_constraints = intel_westmere_event_constraints;
                pr_cont("Westmere events, ");
                break;
+
        default:
                /*
                 * default constraints for v2 and up
This page took 0.047203 seconds and 5 git commands to generate.