perf/x86: Add 3 new scheduling callbacks
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event.h
index df525d2be1e814766ac96a82ee3caf33cc4316e4..ea27e63fc945c74e16517801ce0f04431ff9dc80 100644 (file)
@@ -125,6 +125,12 @@ struct intel_shared_regs {
 
 #define MAX_LBR_ENTRIES                16
 
+enum {
+       X86_PERF_KFREE_SHARED = 0,
+       X86_PERF_KFREE_EXCL   = 1,
+       X86_PERF_KFREE_MAX
+};
+
 struct cpu_hw_events {
        /*
         * Generic x86 PMC bits
@@ -187,7 +193,7 @@ struct cpu_hw_events {
        /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
        u64                             perf_ctr_virt_mask;
 
-       void                            *kfree_on_online;
+       void                            *kfree_on_online[X86_PERF_KFREE_MAX];
 };
 
 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
@@ -408,6 +414,13 @@ union x86_pmu_config {
 
 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
 
+enum {
+       x86_lbr_exclusive_lbr,
+       x86_lbr_exclusive_bts,
+       x86_lbr_exclusive_pt,
+       x86_lbr_exclusive_max,
+};
+
 /*
  * struct x86_pmu - generic x86 pmu
  */
@@ -447,10 +460,20 @@ struct x86_pmu {
 
        void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
                                                 struct perf_event *event);
+
+       void            (*commit_scheduling)(struct cpu_hw_events *cpuc,
+                                            struct perf_event *event,
+                                            int cntr);
+
+       void            (*start_scheduling)(struct cpu_hw_events *cpuc);
+
+       void            (*stop_scheduling)(struct cpu_hw_events *cpuc);
+
        struct event_constraint *event_constraints;
        struct x86_pmu_quirk *quirks;
        int             perfctr_second_write;
        bool            late_ack;
+       unsigned        (*limit_period)(struct perf_event *event, unsigned l);
 
        /*
         * sysfs attrs
@@ -472,7 +495,8 @@ struct x86_pmu {
        void            (*cpu_dead)(int cpu);
 
        void            (*check_microcode)(void);
-       void            (*flush_branch_stack)(void);
+       void            (*sched_task)(struct perf_event_context *ctx,
+                                     bool sched_in);
 
        /*
         * Intel Arch Perfmon v2+
@@ -503,11 +527,16 @@ struct x86_pmu {
        const int       *lbr_sel_map;              /* lbr_select mappings */
        bool            lbr_double_abort;          /* duplicated lbr aborts */
 
+       /*
+        * Intel PT/LBR/BTS are exclusive
+        */
+       atomic_t        lbr_exclusive[x86_lbr_exclusive_max];
+
        /*
         * Extra registers for events
         */
        struct extra_reg *extra_regs;
-       unsigned int er_flags;
+       unsigned int flags;
 
        /*
         * Intel host/guest support (KVM)
@@ -515,6 +544,13 @@ struct x86_pmu {
        struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
 };
 
+struct x86_perf_task_context {
+       u64 lbr_from[MAX_LBR_ENTRIES];
+       u64 lbr_to[MAX_LBR_ENTRIES];
+       int lbr_callstack_users;
+       int lbr_stack_state;
+};
+
 #define x86_add_quirk(func_)                                           \
 do {                                                                   \
        static struct x86_pmu_quirk __quirk __initdata = {              \
@@ -524,8 +560,11 @@ do {                                                                       \
        x86_pmu.quirks = &__quirk;                                      \
 } while (0)
 
-#define ERF_NO_HT_SHARING      1
-#define ERF_HAS_RSP_1          2
+/*
+ * x86_pmu flags
+ */
+#define PMU_FL_NO_HT_SHARING   0x1 /* no hyper-threading resource sharing */
+#define PMU_FL_HAS_RSP_1       0x2 /* has 2 equivalent offcore_rsp regs   */
 
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -546,6 +585,12 @@ static struct perf_pmu_events_attr event_attr_##v = {                      \
 
 extern struct x86_pmu x86_pmu __read_mostly;
 
+static inline bool x86_pmu_has_lbr_callstack(void)
+{
+       return  x86_pmu.lbr_sel_map &&
+               x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
+}
+
 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 
 int x86_perf_event_set_period(struct perf_event *event);
@@ -588,6 +633,12 @@ static inline int x86_pmu_rdpmc_index(int index)
        return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
 }
 
+int x86_add_exclusive(unsigned int what);
+
+void x86_del_exclusive(unsigned int what);
+
+void hw_perf_lbr_event_destroy(struct perf_event *event);
+
 int x86_setup_perfctr(struct perf_event *event);
 
 int x86_pmu_hw_config(struct perf_event *event);
@@ -674,6 +725,29 @@ static inline int amd_pmu_init(void)
 
 #ifdef CONFIG_CPU_SUP_INTEL
 
+static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
+{
+       /* user explicitly requested branch sampling */
+       if (has_branch_stack(event))
+               return true;
+
+       /* implicit branch sampling to correct PEBS skid */
+       if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
+           x86_pmu.intel_cap.pebs_format < 2)
+               return true;
+
+       return false;
+}
+
+static inline bool intel_pmu_has_bts(struct perf_event *event)
+{
+       if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
+           !event->attr.freq && event->hw.sample_period == 1)
+               return true;
+
+       return false;
+}
+
 int intel_pmu_save_and_restart(struct perf_event *event);
 
 struct event_constraint *
@@ -727,6 +801,8 @@ void intel_pmu_pebs_disable_all(void);
 
 void intel_ds_init(void);
 
+void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
+
 void intel_pmu_lbr_reset(void);
 
 void intel_pmu_lbr_enable(struct perf_event *event);
@@ -747,8 +823,18 @@ void intel_pmu_lbr_init_atom(void);
 
 void intel_pmu_lbr_init_snb(void);
 
+void intel_pmu_lbr_init_hsw(void);
+
 int intel_pmu_setup_lbr_filter(struct perf_event *event);
 
+void intel_pt_interrupt(void);
+
+int intel_bts_interrupt(void);
+
+void intel_bts_enable_local(void);
+
+void intel_bts_disable_local(void);
+
 int p4_pmu_init(void);
 
 int p6_pmu_init(void);
This page took 0.050562 seconds and 5 git commands to generate.