Merge branch 'for-linus-4.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / tools / perf / util / stat.c
1 #include <math.h>
2 #include "stat.h"
3 #include "evlist.h"
4 #include "evsel.h"
5 #include "thread_map.h"
6
7 void update_stats(struct stats *stats, u64 val)
8 {
9 double delta;
10
11 stats->n++;
12 delta = val - stats->mean;
13 stats->mean += delta / stats->n;
14 stats->M2 += delta*(val - stats->mean);
15
16 if (val > stats->max)
17 stats->max = val;
18
19 if (val < stats->min)
20 stats->min = val;
21 }
22
23 double avg_stats(struct stats *stats)
24 {
25 return stats->mean;
26 }
27
28 /*
29 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
30 *
31 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
32 * s^2 = -------------------------------
33 * n - 1
34 *
35 * http://en.wikipedia.org/wiki/Stddev
36 *
37 * The std dev of the mean is related to the std dev by:
38 *
39 * s
40 * s_mean = -------
41 * sqrt(n)
42 *
43 */
44 double stddev_stats(struct stats *stats)
45 {
46 double variance, variance_mean;
47
48 if (stats->n < 2)
49 return 0.0;
50
51 variance = stats->M2 / (stats->n - 1);
52 variance_mean = variance / stats->n;
53
54 return sqrt(variance_mean);
55 }
56
57 double rel_stddev_stats(double stddev, double avg)
58 {
59 double pct = 0.0;
60
61 if (avg)
62 pct = 100.0 * stddev/avg;
63
64 return pct;
65 }
66
67 bool __perf_evsel_stat__is(struct perf_evsel *evsel,
68 enum perf_stat_evsel_id id)
69 {
70 struct perf_stat_evsel *ps = evsel->priv;
71
72 return ps->id == id;
73 }
74
75 #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
76 static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
77 ID(NONE, x),
78 ID(CYCLES_IN_TX, cpu/cycles-t/),
79 ID(TRANSACTION_START, cpu/tx-start/),
80 ID(ELISION_START, cpu/el-start/),
81 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
82 };
83 #undef ID
84
85 void perf_stat_evsel_id_init(struct perf_evsel *evsel)
86 {
87 struct perf_stat_evsel *ps = evsel->priv;
88 int i;
89
90 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
91
92 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
93 if (!strcmp(perf_evsel__name(evsel), id_str[i])) {
94 ps->id = i;
95 break;
96 }
97 }
98 }
99
100 static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
101 {
102 int i;
103 struct perf_stat_evsel *ps = evsel->priv;
104
105 for (i = 0; i < 3; i++)
106 init_stats(&ps->res_stats[i]);
107
108 perf_stat_evsel_id_init(evsel);
109 }
110
111 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
112 {
113 evsel->priv = zalloc(sizeof(struct perf_stat_evsel));
114 if (evsel->priv == NULL)
115 return -ENOMEM;
116 perf_evsel__reset_stat_priv(evsel);
117 return 0;
118 }
119
120 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
121 {
122 zfree(&evsel->priv);
123 }
124
125 static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
126 int ncpus, int nthreads)
127 {
128 struct perf_counts *counts;
129
130 counts = perf_counts__new(ncpus, nthreads);
131 if (counts)
132 evsel->prev_raw_counts = counts;
133
134 return counts ? 0 : -ENOMEM;
135 }
136
137 static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
138 {
139 perf_counts__delete(evsel->prev_raw_counts);
140 evsel->prev_raw_counts = NULL;
141 }
142
143 static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
144 {
145 int ncpus = perf_evsel__nr_cpus(evsel);
146 int nthreads = thread_map__nr(evsel->threads);
147
148 if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
149 perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
150 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
151 return -ENOMEM;
152
153 return 0;
154 }
155
156 int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
157 {
158 struct perf_evsel *evsel;
159
160 evlist__for_each(evlist, evsel) {
161 if (perf_evsel__alloc_stats(evsel, alloc_raw))
162 goto out_free;
163 }
164
165 return 0;
166
167 out_free:
168 perf_evlist__free_stats(evlist);
169 return -1;
170 }
171
172 void perf_evlist__free_stats(struct perf_evlist *evlist)
173 {
174 struct perf_evsel *evsel;
175
176 evlist__for_each(evlist, evsel) {
177 perf_evsel__free_stat_priv(evsel);
178 perf_evsel__free_counts(evsel);
179 perf_evsel__free_prev_raw_counts(evsel);
180 }
181 }
182
183 void perf_evlist__reset_stats(struct perf_evlist *evlist)
184 {
185 struct perf_evsel *evsel;
186
187 evlist__for_each(evlist, evsel) {
188 perf_evsel__reset_stat_priv(evsel);
189 perf_evsel__reset_counts(evsel);
190 }
191 }
192
193 static void zero_per_pkg(struct perf_evsel *counter)
194 {
195 if (counter->per_pkg_mask)
196 memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
197 }
198
199 static int check_per_pkg(struct perf_evsel *counter,
200 struct perf_counts_values *vals, int cpu, bool *skip)
201 {
202 unsigned long *mask = counter->per_pkg_mask;
203 struct cpu_map *cpus = perf_evsel__cpus(counter);
204 int s;
205
206 *skip = false;
207
208 if (!counter->per_pkg)
209 return 0;
210
211 if (cpu_map__empty(cpus))
212 return 0;
213
214 if (!mask) {
215 mask = zalloc(MAX_NR_CPUS);
216 if (!mask)
217 return -ENOMEM;
218
219 counter->per_pkg_mask = mask;
220 }
221
222 /*
223 * we do not consider an event that has not run as a good
224 * instance to mark a package as used (skip=1). Otherwise
225 * we may run into a situation where the first CPU in a package
226 * is not running anything, yet the second is, and this function
227 * would mark the package as used after the first CPU and would
228 * not read the values from the second CPU.
229 */
230 if (!(vals->run && vals->ena))
231 return 0;
232
233 s = cpu_map__get_socket(cpus, cpu, NULL);
234 if (s < 0)
235 return -1;
236
237 *skip = test_and_set_bit(s, mask) == 1;
238 return 0;
239 }
240
241 static int
242 process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel,
243 int cpu, int thread,
244 struct perf_counts_values *count)
245 {
246 struct perf_counts_values *aggr = &evsel->counts->aggr;
247 static struct perf_counts_values zero;
248 bool skip = false;
249
250 if (check_per_pkg(evsel, count, cpu, &skip)) {
251 pr_err("failed to read per-pkg counter\n");
252 return -1;
253 }
254
255 if (skip)
256 count = &zero;
257
258 switch (config->aggr_mode) {
259 case AGGR_THREAD:
260 case AGGR_CORE:
261 case AGGR_SOCKET:
262 case AGGR_NONE:
263 if (!evsel->snapshot)
264 perf_evsel__compute_deltas(evsel, cpu, thread, count);
265 perf_counts_values__scale(count, config->scale, NULL);
266 if (config->aggr_mode == AGGR_NONE)
267 perf_stat__update_shadow_stats(evsel, count->values, cpu);
268 break;
269 case AGGR_GLOBAL:
270 aggr->val += count->val;
271 if (config->scale) {
272 aggr->ena += count->ena;
273 aggr->run += count->run;
274 }
275 case AGGR_UNSET:
276 default:
277 break;
278 }
279
280 return 0;
281 }
282
283 static int process_counter_maps(struct perf_stat_config *config,
284 struct perf_evsel *counter)
285 {
286 int nthreads = thread_map__nr(counter->threads);
287 int ncpus = perf_evsel__nr_cpus(counter);
288 int cpu, thread;
289
290 if (counter->system_wide)
291 nthreads = 1;
292
293 for (thread = 0; thread < nthreads; thread++) {
294 for (cpu = 0; cpu < ncpus; cpu++) {
295 if (process_counter_values(config, counter, cpu, thread,
296 perf_counts(counter->counts, cpu, thread)))
297 return -1;
298 }
299 }
300
301 return 0;
302 }
303
304 int perf_stat_process_counter(struct perf_stat_config *config,
305 struct perf_evsel *counter)
306 {
307 struct perf_counts_values *aggr = &counter->counts->aggr;
308 struct perf_stat_evsel *ps = counter->priv;
309 u64 *count = counter->counts->aggr.values;
310 u64 val;
311 int i, ret;
312
313 aggr->val = aggr->ena = aggr->run = 0;
314
315 /*
316 * We calculate counter's data every interval,
317 * and the display code shows ps->res_stats
318 * avg value. We need to zero the stats for
319 * interval mode, otherwise overall avg running
320 * averages will be shown for each interval.
321 */
322 if (config->interval)
323 init_stats(ps->res_stats);
324
325 if (counter->per_pkg)
326 zero_per_pkg(counter);
327
328 ret = process_counter_maps(config, counter);
329 if (ret)
330 return ret;
331
332 if (config->aggr_mode != AGGR_GLOBAL)
333 return 0;
334
335 if (!counter->snapshot)
336 perf_evsel__compute_deltas(counter, -1, -1, aggr);
337 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
338
339 for (i = 0; i < 3; i++)
340 update_stats(&ps->res_stats[i], count[i]);
341
342 if (verbose) {
343 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
344 perf_evsel__name(counter), count[0], count[1], count[2]);
345 }
346
347 /*
348 * Save the full runtime - to allow normalization during printout:
349 */
350 val = counter->scale * *count;
351 perf_stat__update_shadow_stats(counter, &val, 0);
352
353 return 0;
354 }
355
356 int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused,
357 union perf_event *event,
358 struct perf_session *session)
359 {
360 struct perf_counts_values count;
361 struct stat_event *st = &event->stat;
362 struct perf_evsel *counter;
363
364 count.val = st->val;
365 count.ena = st->ena;
366 count.run = st->run;
367
368 counter = perf_evlist__id2evsel(session->evlist, st->id);
369 if (!counter) {
370 pr_err("Failed to resolve counter for stat event.\n");
371 return -EINVAL;
372 }
373
374 *perf_counts(counter->counts, st->cpu, st->thread) = count;
375 counter->supported = true;
376 return 0;
377 }
378
379 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
380 {
381 struct stat_event *st = (struct stat_event *) event;
382 size_t ret;
383
384 ret = fprintf(fp, "\n... id %" PRIu64 ", cpu %d, thread %d\n",
385 st->id, st->cpu, st->thread);
386 ret += fprintf(fp, "... value %" PRIu64 ", enabled %" PRIu64 ", running %" PRIu64 "\n",
387 st->val, st->ena, st->run);
388
389 return ret;
390 }
391
392 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
393 {
394 struct stat_round_event *rd = (struct stat_round_event *)event;
395 size_t ret;
396
397 ret = fprintf(fp, "\n... time %" PRIu64 ", type %s\n", rd->time,
398 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
399
400 return ret;
401 }
402
403 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
404 {
405 struct perf_stat_config sc;
406 size_t ret;
407
408 perf_event__read_stat_config(&sc, &event->stat_config);
409
410 ret = fprintf(fp, "\n");
411 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
412 ret += fprintf(fp, "... scale %d\n", sc.scale);
413 ret += fprintf(fp, "... interval %u\n", sc.interval);
414
415 return ret;
416 }
This page took 0.069242 seconds and 6 git commands to generate.