Merge tag 'pci-v3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[deliverable/linux.git] / tools / perf / ui / hist.c
1 #include <math.h>
2 #include <linux/compiler.h>
3
4 #include "../util/hist.h"
5 #include "../util/util.h"
6 #include "../util/sort.h"
7 #include "../util/evsel.h"
8
9 /* hist period print (hpp) functions */
10
11 #define hpp__call_print_fn(hpp, fn, fmt, ...) \
12 ({ \
13 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
14 advance_hpp(hpp, __ret); \
15 __ret; \
16 })
17
18 int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
19 hpp_field_fn get_field, hpp_callback_fn callback,
20 const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent)
21 {
22 int ret = 0;
23 struct hists *hists = he->hists;
24 struct perf_evsel *evsel = hists_to_evsel(hists);
25 char *buf = hpp->buf;
26 size_t size = hpp->size;
27
28 if (callback) {
29 ret = callback(hpp, true);
30 advance_hpp(hpp, ret);
31 }
32
33 if (fmt_percent) {
34 double percent = 0.0;
35
36 if (hists->stats.total_period)
37 percent = 100.0 * get_field(he) /
38 hists->stats.total_period;
39
40 ret += hpp__call_print_fn(hpp, print_fn, fmt, percent);
41 } else
42 ret += hpp__call_print_fn(hpp, print_fn, fmt, get_field(he));
43
44 if (perf_evsel__is_group_event(evsel)) {
45 int prev_idx, idx_delta;
46 struct hist_entry *pair;
47 int nr_members = evsel->nr_members;
48
49 prev_idx = perf_evsel__group_idx(evsel);
50
51 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
52 u64 period = get_field(pair);
53 u64 total = pair->hists->stats.total_period;
54
55 if (!total)
56 continue;
57
58 evsel = hists_to_evsel(pair->hists);
59 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
60
61 while (idx_delta--) {
62 /*
63 * zero-fill group members in the middle which
64 * have no sample
65 */
66 if (fmt_percent) {
67 ret += hpp__call_print_fn(hpp, print_fn,
68 fmt, 0.0);
69 } else {
70 ret += hpp__call_print_fn(hpp, print_fn,
71 fmt, 0ULL);
72 }
73 }
74
75 if (fmt_percent) {
76 ret += hpp__call_print_fn(hpp, print_fn, fmt,
77 100.0 * period / total);
78 } else {
79 ret += hpp__call_print_fn(hpp, print_fn, fmt,
80 period);
81 }
82
83 prev_idx = perf_evsel__group_idx(evsel);
84 }
85
86 idx_delta = nr_members - prev_idx - 1;
87
88 while (idx_delta--) {
89 /*
90 * zero-fill group members at last which have no sample
91 */
92 if (fmt_percent) {
93 ret += hpp__call_print_fn(hpp, print_fn,
94 fmt, 0.0);
95 } else {
96 ret += hpp__call_print_fn(hpp, print_fn,
97 fmt, 0ULL);
98 }
99 }
100 }
101
102 if (callback) {
103 int __ret = callback(hpp, false);
104
105 advance_hpp(hpp, __ret);
106 ret += __ret;
107 }
108
109 /*
110 * Restore original buf and size as it's where caller expects
111 * the result will be saved.
112 */
113 hpp->buf = buf;
114 hpp->size = size;
115
116 return ret;
117 }
118
119 #define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
120 static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
121 struct perf_hpp *hpp, \
122 struct perf_evsel *evsel) \
123 { \
124 int len = _min_width; \
125 \
126 if (symbol_conf.event_group) \
127 len = max(len, evsel->nr_members * _unit_width); \
128 \
129 return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \
130 }
131
132 #define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
133 static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
134 struct perf_hpp *hpp __maybe_unused, \
135 struct perf_evsel *evsel) \
136 { \
137 int len = _min_width; \
138 \
139 if (symbol_conf.event_group) \
140 len = max(len, evsel->nr_members * _unit_width); \
141 \
142 return len; \
143 }
144
145 static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
146 {
147 va_list args;
148 ssize_t ssize = hpp->size;
149 double percent;
150 int ret;
151
152 va_start(args, fmt);
153 percent = va_arg(args, double);
154 ret = value_color_snprintf(hpp->buf, hpp->size, fmt, percent);
155 va_end(args);
156
157 return (ret >= ssize) ? (ssize - 1) : ret;
158 }
159
160 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
161 {
162 va_list args;
163 ssize_t ssize = hpp->size;
164 int ret;
165
166 va_start(args, fmt);
167 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
168 va_end(args);
169
170 return (ret >= ssize) ? (ssize - 1) : ret;
171 }
172
173 #define __HPP_COLOR_PERCENT_FN(_type, _field) \
174 static u64 he_get_##_field(struct hist_entry *he) \
175 { \
176 return he->stat._field; \
177 } \
178 \
179 static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
180 struct perf_hpp *hpp, struct hist_entry *he) \
181 { \
182 return __hpp__fmt(hpp, he, he_get_##_field, NULL, " %6.2f%%", \
183 hpp_color_scnprintf, true); \
184 }
185
186 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
187 static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
188 struct perf_hpp *hpp, struct hist_entry *he) \
189 { \
190 const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
191 return __hpp__fmt(hpp, he, he_get_##_field, NULL, fmt, \
192 hpp_entry_scnprintf, true); \
193 }
194
195 #define __HPP_ENTRY_RAW_FN(_type, _field) \
196 static u64 he_get_raw_##_field(struct hist_entry *he) \
197 { \
198 return he->stat._field; \
199 } \
200 \
201 static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
202 struct perf_hpp *hpp, struct hist_entry *he) \
203 { \
204 const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
205 return __hpp__fmt(hpp, he, he_get_raw_##_field, NULL, fmt, \
206 hpp_entry_scnprintf, false); \
207 }
208
209 #define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \
210 __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
211 __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
212 __HPP_COLOR_PERCENT_FN(_type, _field) \
213 __HPP_ENTRY_PERCENT_FN(_type, _field)
214
215 #define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \
216 __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
217 __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
218 __HPP_ENTRY_RAW_FN(_type, _field)
219
220
221 HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8)
222 HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8)
223 HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8)
224 HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8)
225 HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8)
226
227 HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12)
228 HPP_RAW_FNS(period, "Period", period, 12, 12)
229
230 #define HPP__COLOR_PRINT_FNS(_name) \
231 { \
232 .header = hpp__header_ ## _name, \
233 .width = hpp__width_ ## _name, \
234 .color = hpp__color_ ## _name, \
235 .entry = hpp__entry_ ## _name \
236 }
237
238 #define HPP__PRINT_FNS(_name) \
239 { \
240 .header = hpp__header_ ## _name, \
241 .width = hpp__width_ ## _name, \
242 .entry = hpp__entry_ ## _name \
243 }
244
245 struct perf_hpp_fmt perf_hpp__format[] = {
246 HPP__COLOR_PRINT_FNS(overhead),
247 HPP__COLOR_PRINT_FNS(overhead_sys),
248 HPP__COLOR_PRINT_FNS(overhead_us),
249 HPP__COLOR_PRINT_FNS(overhead_guest_sys),
250 HPP__COLOR_PRINT_FNS(overhead_guest_us),
251 HPP__PRINT_FNS(samples),
252 HPP__PRINT_FNS(period)
253 };
254
255 LIST_HEAD(perf_hpp__list);
256
257
258 #undef HPP__COLOR_PRINT_FNS
259 #undef HPP__PRINT_FNS
260
261 #undef HPP_PERCENT_FNS
262 #undef HPP_RAW_FNS
263
264 #undef __HPP_HEADER_FN
265 #undef __HPP_WIDTH_FN
266 #undef __HPP_COLOR_PERCENT_FN
267 #undef __HPP_ENTRY_PERCENT_FN
268 #undef __HPP_ENTRY_RAW_FN
269
270
271 void perf_hpp__init(void)
272 {
273 perf_hpp__column_enable(PERF_HPP__OVERHEAD);
274
275 if (symbol_conf.show_cpu_utilization) {
276 perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS);
277 perf_hpp__column_enable(PERF_HPP__OVERHEAD_US);
278
279 if (perf_guest) {
280 perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS);
281 perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US);
282 }
283 }
284
285 if (symbol_conf.show_nr_samples)
286 perf_hpp__column_enable(PERF_HPP__SAMPLES);
287
288 if (symbol_conf.show_total_period)
289 perf_hpp__column_enable(PERF_HPP__PERIOD);
290 }
291
292 void perf_hpp__column_register(struct perf_hpp_fmt *format)
293 {
294 list_add_tail(&format->list, &perf_hpp__list);
295 }
296
297 void perf_hpp__column_enable(unsigned col)
298 {
299 BUG_ON(col >= PERF_HPP__MAX_INDEX);
300 perf_hpp__column_register(&perf_hpp__format[col]);
301 }
302
303 int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
304 struct hists *hists)
305 {
306 const char *sep = symbol_conf.field_sep;
307 struct sort_entry *se;
308 int ret = 0;
309
310 list_for_each_entry(se, &hist_entry__sort_list, list) {
311 if (se->elide)
312 continue;
313
314 ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
315 ret += se->se_snprintf(he, s + ret, size - ret,
316 hists__col_len(hists, se->se_width_idx));
317 }
318
319 return ret;
320 }
321
322 /*
323 * See hists__fprintf to match the column widths
324 */
325 unsigned int hists__sort_list_width(struct hists *hists)
326 {
327 struct perf_hpp_fmt *fmt;
328 struct sort_entry *se;
329 int i = 0, ret = 0;
330 struct perf_hpp dummy_hpp;
331
332 perf_hpp__for_each_format(fmt) {
333 if (i)
334 ret += 2;
335
336 ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
337 }
338
339 list_for_each_entry(se, &hist_entry__sort_list, list)
340 if (!se->elide)
341 ret += 2 + hists__col_len(hists, se->se_width_idx);
342
343 if (verbose) /* Addr + origin */
344 ret += 3 + BITS_PER_LONG / 4;
345
346 return ret;
347 }
This page took 0.038979 seconds and 5 git commands to generate.