perf/x86: Add 'index' param to get_event_constraint() callback
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_intel.c
index fc6dbc46af4a5107e6a8cb8ab7a77facaa4c9437..2dd34b57d3ff610f16d3403c7350fb1eeeb2f010 100644 (file)
@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
        /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
-       INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
+       INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
        /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
-       INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
+       INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
-       INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
+       INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
        EVENT_CONSTRAINT_END
 };
 
@@ -1242,6 +1242,8 @@ static void intel_pmu_disable_all(void)
 
        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
                intel_pmu_disable_bts();
+       else
+               intel_bts_disable_local();
 
        intel_pmu_pebs_disable_all();
        intel_pmu_lbr_disable_all();
@@ -1264,7 +1266,8 @@ static void intel_pmu_enable_all(int added)
                        return;
 
                intel_pmu_enable_bts(event->hw.config);
-       }
+       } else
+               intel_bts_enable_local();
 }
 
 /*
@@ -1550,6 +1553,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
        intel_pmu_disable_all();
        handled = intel_pmu_drain_bts_buffer();
+       handled += intel_bts_interrupt();
        status = intel_pmu_get_status();
        if (!status)
                goto done;
@@ -1589,6 +1593,14 @@ again:
                x86_pmu.drain_pebs(regs);
        }
 
+       /*
+        * Intel PT
+        */
+       if (__test_and_clear_bit(55, (unsigned long *)&status)) {
+               handled++;
+               intel_pt_interrupt();
+       }
+
        /*
         * Checkpointed counters can lead to 'spurious' PMIs because the
         * rollback caused by the PMI will have cleared the overflow status
@@ -1655,7 +1667,7 @@ intel_bts_constraints(struct perf_event *event)
 
 static int intel_alt_er(int idx)
 {
-       if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
+       if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
                return idx;
 
        if (idx == EXTRA_REG_RSP_0)
@@ -1815,7 +1827,8 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
 }
 
 struct event_constraint *
-x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                         struct perf_event *event)
 {
        struct event_constraint *c;
 
@@ -1832,7 +1845,8 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
 }
 
 static struct event_constraint *
-intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                           struct perf_event *event)
 {
        struct event_constraint *c;
 
@@ -1840,15 +1854,15 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
        if (c)
                return c;
 
-       c = intel_pebs_constraints(event);
+       c = intel_shared_regs_constraints(cpuc, event);
        if (c)
                return c;
 
-       c = intel_shared_regs_constraints(cpuc, event);
+       c = intel_pebs_constraints(event);
        if (c)
                return c;
 
-       return x86_get_event_constraints(cpuc, event);
+       return x86_get_event_constraints(cpuc, idx, event);
 }
 
 static void
@@ -1942,6 +1956,17 @@ static int intel_pmu_hw_config(struct perf_event *event)
                ret = intel_pmu_setup_lbr_filter(event);
                if (ret)
                        return ret;
+
+               /*
+                * BTS is set up earlier in this path, so don't account twice
+                */
+               if (!intel_pmu_has_bts(event)) {
+                       /* disallow lbr if conflicting events are present */
+                       if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+                               return -EBUSY;
+
+                       event->destroy = hw_perf_lbr_event_destroy;
+               }
        }
 
        if (event->attr.type != PERF_TYPE_RAW)
@@ -2082,9 +2107,12 @@ static struct event_constraint counter2_constraint =
                        EVENT_CONSTRAINT(0, 0x4, 0);
 
 static struct event_constraint *
-hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                         struct perf_event *event)
 {
-       struct event_constraint *c = intel_get_event_constraints(cpuc, event);
+       struct event_constraint *c;
+
+       c = intel_get_event_constraints(cpuc, idx, event);
 
        /* Handle special quirk on in_tx_checkpointed only in counter 2 */
        if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
@@ -2227,13 +2255,15 @@ static void intel_pmu_cpu_starting(int cpu)
        if (!cpuc->shared_regs)
                return;
 
-       if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
+       if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
+               void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
+
                for_each_cpu(i, topology_thread_cpumask(cpu)) {
                        struct intel_shared_regs *pc;
 
                        pc = per_cpu(cpu_hw_events, i).shared_regs;
                        if (pc && pc->core_id == core_id) {
-                               cpuc->kfree_on_online = cpuc->shared_regs;
+                               *onln = cpuc->shared_regs;
                                cpuc->shared_regs = pc;
                                break;
                        }
@@ -2648,7 +2678,7 @@ __init int intel_pmu_init(void)
                x86_pmu.event_constraints = intel_slm_event_constraints;
                x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
                x86_pmu.extra_regs = intel_slm_extra_regs;
-               x86_pmu.er_flags |= ERF_HAS_RSP_1;
+               x86_pmu.flags |= PMU_FL_HAS_RSP_1;
                pr_cont("Silvermont events, ");
                break;
 
@@ -2666,7 +2696,7 @@ __init int intel_pmu_init(void)
                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
                x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
                x86_pmu.extra_regs = intel_westmere_extra_regs;
-               x86_pmu.er_flags |= ERF_HAS_RSP_1;
+               x86_pmu.flags |= PMU_FL_HAS_RSP_1;
 
                x86_pmu.cpu_events = nhm_events_attrs;
 
@@ -2698,8 +2728,8 @@ __init int intel_pmu_init(void)
                else
                        x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
-               x86_pmu.er_flags |= ERF_HAS_RSP_1;
-               x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+               x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+               x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
 
                x86_pmu.cpu_events = snb_events_attrs;
 
@@ -2733,8 +2763,8 @@ __init int intel_pmu_init(void)
                else
                        x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
-               x86_pmu.er_flags |= ERF_HAS_RSP_1;
-               x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+               x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+               x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
 
                x86_pmu.cpu_events = snb_events_attrs;
 
@@ -2761,8 +2791,8 @@ __init int intel_pmu_init(void)
                x86_pmu.extra_regs = intel_snbep_extra_regs;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
                /* all extra regs are per-cpu when HT is on */
-               x86_pmu.er_flags |= ERF_HAS_RSP_1;
-               x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+               x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+               x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
 
                x86_pmu.hw_config = hsw_hw_config;
                x86_pmu.get_event_constraints = hsw_get_event_constraints;
@@ -2794,8 +2824,8 @@ __init int intel_pmu_init(void)
                x86_pmu.extra_regs = intel_snbep_extra_regs;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
                /* all extra regs are per-cpu when HT is on */
-               x86_pmu.er_flags |= ERF_HAS_RSP_1;
-               x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+               x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+               x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
 
                x86_pmu.hw_config = hsw_hw_config;
                x86_pmu.get_event_constraints = hsw_get_event_constraints;
This page took 0.031037 seconds and 5 git commands to generate.