perf evsel: Introduce perf_evsel__{in,ex}it
[deliverable/linux.git] / tools / perf / util / evsel.h
1 #ifndef __PERF_EVSEL_H
2 #define __PERF_EVSEL_H 1
3
4 #include <linux/list.h>
5 #include <stdbool.h>
6 #include "../../../include/linux/perf_event.h"
7 #include "types.h"
8 #include "xyarray.h"
9
10 struct perf_counts_values {
11 union {
12 struct {
13 u64 val;
14 u64 ena;
15 u64 run;
16 };
17 u64 values[3];
18 };
19 };
20
21 struct perf_counts {
22 s8 scaled;
23 struct perf_counts_values aggr;
24 struct perf_counts_values cpu[];
25 };
26
27 struct perf_evsel;
28
29 /*
30 * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
31 * more than one entry in the evlist.
32 */
33 struct perf_sample_id {
34 struct hlist_node node;
35 u64 id;
36 struct perf_evsel *evsel;
37 };
38
39 struct perf_evsel {
40 struct list_head node;
41 struct perf_event_attr attr;
42 char *filter;
43 struct xyarray *fd;
44 struct xyarray *id;
45 struct perf_counts *counts;
46 int idx;
47 void *priv;
48 };
49
50 struct cpu_map;
51 struct thread_map;
52 struct perf_evlist;
53
54 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
55 void perf_evsel__init(struct perf_evsel *evsel,
56 struct perf_event_attr *attr, int idx);
57 void perf_evsel__exit(struct perf_evsel *evsel);
58 void perf_evsel__delete(struct perf_evsel *evsel);
59
60 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
61 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
62 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
63 int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus);
64 void perf_evsel__free_fd(struct perf_evsel *evsel);
65 void perf_evsel__free_id(struct perf_evsel *evsel);
66 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
67
68 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
69 struct cpu_map *cpus, bool group, bool inherit);
70 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
71 struct thread_map *threads, bool group, bool inherit);
72 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
73 struct thread_map *threads, bool group, bool inherit);
74 int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus,
75 struct thread_map *threads, int pages, bool overwrite);
76 void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus);
77
78 #define perf_evsel__match(evsel, t, c) \
79 (evsel->attr.type == PERF_TYPE_##t && \
80 evsel->attr.config == PERF_COUNT_##c)
81
82 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
83 int cpu, int thread, bool scale);
84
85 /**
86 * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
87 *
88 * @evsel - event selector to read value
89 * @cpu - CPU of interest
90 * @thread - thread of interest
91 */
92 static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
93 int cpu, int thread)
94 {
95 return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
96 }
97
98 /**
99 * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
100 *
101 * @evsel - event selector to read value
102 * @cpu - CPU of interest
103 * @thread - thread of interest
104 */
105 static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
106 int cpu, int thread)
107 {
108 return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
109 }
110
111 int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
112 bool scale);
113
114 /**
115 * perf_evsel__read - Read the aggregate results on all CPUs
116 *
117 * @evsel - event selector to read value
118 * @ncpus - Number of cpus affected, from zero
119 * @nthreads - Number of threads affected, from zero
120 */
121 static inline int perf_evsel__read(struct perf_evsel *evsel,
122 int ncpus, int nthreads)
123 {
124 return __perf_evsel__read(evsel, ncpus, nthreads, false);
125 }
126
127 /**
128 * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
129 *
130 * @evsel - event selector to read value
131 * @ncpus - Number of cpus affected, from zero
132 * @nthreads - Number of threads affected, from zero
133 */
134 static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
135 int ncpus, int nthreads)
136 {
137 return __perf_evsel__read(evsel, ncpus, nthreads, true);
138 }
139
140 #endif /* __PERF_EVSEL_H */
This page took 0.036114 seconds and 5 git commands to generate.