perf/x86/intel: Reduce lbr_sel_map[] size
[deliverable/linux.git] / include / linux / perf_event.h
index 4f7a61ca4b393dc837cb4ad278c4a66306247cbd..33262004c31041c69b0706eb0b07bf37931b83b8 100644 (file)
@@ -202,6 +202,13 @@ struct pmu {
         */
        int (*event_init)               (struct perf_event *event);
 
+       /*
+        * Notification that the event was mapped or unmapped.  Called
+        * in the context of the mapping task.
+        */
+       void (*event_mapped)            (struct perf_event *event); /*optional*/
+       void (*event_unmapped)          (struct perf_event *event); /*optional*/
+
 #define PERF_EF_START  0x01            /* start the counter when adding    */
 #define PERF_EF_RELOAD 0x02            /* reload the counter when starting */
 #define PERF_EF_UPDATE 0x04            /* update the counter when stopping */
@@ -450,11 +457,6 @@ struct perf_event {
 #endif /* CONFIG_PERF_EVENTS */
 };
 
-enum perf_event_context_type {
-       task_context,
-       cpu_context,
-};
-
 /**
  * struct perf_event_context - event context structure
  *
@@ -462,7 +464,6 @@ enum perf_event_context_type {
  */
 struct perf_event_context {
        struct pmu                      *pmu;
-       enum perf_event_context_type    type;
        /*
         * Protect the states of the events in the list,
         * nr_active, and the list:
@@ -475,6 +476,7 @@ struct perf_event_context {
         */
        struct mutex                    mutex;
 
+       struct list_head                active_ctx_list;
        struct list_head                pinned_groups;
        struct list_head                flexible_groups;
        struct list_head                event_list;
@@ -525,7 +527,6 @@ struct perf_cpu_context {
        int                             exclusive;
        struct hrtimer                  hrtimer;
        ktime_t                         hrtimer_interval;
-       struct list_head                rotation_list;
        struct pmu                      *unique_pmu;
        struct perf_cgroup              *cgrp;
 };
@@ -665,6 +666,7 @@ static inline int is_software_event(struct perf_event *event)
 
 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
+extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
 
 #ifndef perf_arch_fetch_caller_regs
@@ -689,14 +691,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 static __always_inline void
 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
-       struct pt_regs hot_regs;
+       if (static_key_false(&perf_swevent_enabled[event_id]))
+               __perf_sw_event(event_id, nr, regs, addr);
+}
+
+DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
 
+/*
+ * 'Special' version for the scheduler, it hard assumes no recursion,
+ * which is guaranteed by us not actually scheduling inside other swevents
+ * because those disable preemption.
+ */
+static __always_inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
+{
        if (static_key_false(&perf_swevent_enabled[event_id])) {
-               if (!regs) {
-                       perf_fetch_caller_regs(&hot_regs);
-                       regs = &hot_regs;
-               }
-               __perf_sw_event(event_id, nr, regs, addr);
+               struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+               perf_fetch_caller_regs(regs);
+               ___perf_sw_event(event_id, nr, regs, addr);
        }
 }
 
@@ -712,7 +725,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
 static inline void perf_event_task_sched_out(struct task_struct *prev,
                                             struct task_struct *next)
 {
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
+       perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
 
        if (static_key_false(&perf_sched_events.key))
                __perf_event_task_sched_out(prev, next);
@@ -823,6 +836,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
 static inline void
 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)    { }
 static inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)                    { }
+static inline void
 perf_bp_event(struct perf_event *event, void *data)                    { }
 
 static inline int perf_register_guest_info_callbacks
This page took 0.037487 seconds and 5 git commands to generate.