perf hists: Enlarge pid sort entry size
[deliverable/linux.git] / tools / perf / util / auxtrace.h
1 /*
2 * auxtrace.h: AUX area trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16 #ifndef __PERF_AUXTRACE_H
17 #define __PERF_AUXTRACE_H
18
19 #include <sys/types.h>
20 #include <stdbool.h>
21 #include <stddef.h>
22 #include <linux/list.h>
23 #include <linux/perf_event.h>
24 #include <linux/types.h>
25
26 #include "../perf.h"
27 #include "event.h"
28 #include "session.h"
29 #include "debug.h"
30
31 union perf_event;
32 struct perf_session;
33 struct perf_evlist;
34 struct perf_tool;
35 struct option;
36 struct record_opts;
37 struct auxtrace_info_event;
38 struct events_stats;
39
40 enum auxtrace_type {
41 PERF_AUXTRACE_UNKNOWN,
42 PERF_AUXTRACE_INTEL_PT,
43 PERF_AUXTRACE_INTEL_BTS,
44 };
45
46 enum itrace_period_type {
47 PERF_ITRACE_PERIOD_INSTRUCTIONS,
48 PERF_ITRACE_PERIOD_TICKS,
49 PERF_ITRACE_PERIOD_NANOSECS,
50 };
51
52 /**
53 * struct itrace_synth_opts - AUX area tracing synthesis options.
54 * @set: indicates whether or not options have been set
55 * @inject: indicates the event (not just the sample) must be fully synthesized
56 * because 'perf inject' will write it out
57 * @instructions: whether to synthesize 'instructions' events
58 * @branches: whether to synthesize 'branches' events
59 * @transactions: whether to synthesize events for transactions
60 * @errors: whether to synthesize decoder error events
61 * @dont_decode: whether to skip decoding entirely
62 * @log: write a decoding log
63 * @calls: limit branch samples to calls (can be combined with @returns)
64 * @returns: limit branch samples to returns (can be combined with @calls)
65 * @callchain: add callchain to 'instructions' events
66 * @last_branch: add branch context to 'instruction' events
67 * @callchain_sz: maximum callchain size
68 * @last_branch_sz: branch context size
69 * @period: 'instructions' events period
70 * @period_type: 'instructions' events period type
71 * @initial_skip: skip N events at the beginning.
72 */
73 struct itrace_synth_opts {
74 bool set;
75 bool inject;
76 bool instructions;
77 bool branches;
78 bool transactions;
79 bool errors;
80 bool dont_decode;
81 bool log;
82 bool calls;
83 bool returns;
84 bool callchain;
85 bool last_branch;
86 unsigned int callchain_sz;
87 unsigned int last_branch_sz;
88 unsigned long long period;
89 enum itrace_period_type period_type;
90 unsigned long initial_skip;
91 };
92
93 /**
94 * struct auxtrace_index_entry - indexes a AUX area tracing event within a
95 * perf.data file.
96 * @file_offset: offset within the perf.data file
97 * @sz: size of the event
98 */
99 struct auxtrace_index_entry {
100 u64 file_offset;
101 u64 sz;
102 };
103
104 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
105
106 /**
107 * struct auxtrace_index - index of AUX area tracing events within a perf.data
108 * file.
109 * @list: linking a number of arrays of entries
110 * @nr: number of entries
111 * @entries: array of entries
112 */
113 struct auxtrace_index {
114 struct list_head list;
115 size_t nr;
116 struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
117 };
118
119 /**
120 * struct auxtrace - session callbacks to allow AUX area data decoding.
121 * @process_event: lets the decoder see all session events
122 * @flush_events: process any remaining data
123 * @free_events: free resources associated with event processing
124 * @free: free resources associated with the session
125 */
126 struct auxtrace {
127 int (*process_event)(struct perf_session *session,
128 union perf_event *event,
129 struct perf_sample *sample,
130 struct perf_tool *tool);
131 int (*process_auxtrace_event)(struct perf_session *session,
132 union perf_event *event,
133 struct perf_tool *tool);
134 int (*flush_events)(struct perf_session *session,
135 struct perf_tool *tool);
136 void (*free_events)(struct perf_session *session);
137 void (*free)(struct perf_session *session);
138 };
139
140 /**
141 * struct auxtrace_buffer - a buffer containing AUX area tracing data.
142 * @list: buffers are queued in a list held by struct auxtrace_queue
143 * @size: size of the buffer in bytes
144 * @pid: in per-thread mode, the pid this buffer is associated with
145 * @tid: in per-thread mode, the tid this buffer is associated with
146 * @cpu: in per-cpu mode, the cpu this buffer is associated with
147 * @data: actual buffer data (can be null if the data has not been loaded)
148 * @data_offset: file offset at which the buffer can be read
149 * @mmap_addr: mmap address at which the buffer can be read
150 * @mmap_size: size of the mmap at @mmap_addr
151 * @data_needs_freeing: @data was malloc'd so free it when it is no longer
152 * needed
153 * @consecutive: the original data was split up and this buffer is consecutive
154 * to the previous buffer
155 * @offset: offset as determined by aux_head / aux_tail members of struct
156 * perf_event_mmap_page
157 * @reference: an implementation-specific reference determined when the data is
158 * recorded
159 * @buffer_nr: used to number each buffer
160 * @use_size: implementation actually only uses this number of bytes
161 * @use_data: implementation actually only uses data starting at this address
162 */
163 struct auxtrace_buffer {
164 struct list_head list;
165 size_t size;
166 pid_t pid;
167 pid_t tid;
168 int cpu;
169 void *data;
170 off_t data_offset;
171 void *mmap_addr;
172 size_t mmap_size;
173 bool data_needs_freeing;
174 bool consecutive;
175 u64 offset;
176 u64 reference;
177 u64 buffer_nr;
178 size_t use_size;
179 void *use_data;
180 };
181
182 /**
183 * struct auxtrace_queue - a queue of AUX area tracing data buffers.
184 * @head: head of buffer list
185 * @tid: in per-thread mode, the tid this queue is associated with
186 * @cpu: in per-cpu mode, the cpu this queue is associated with
187 * @set: %true once this queue has been dedicated to a specific thread or cpu
188 * @priv: implementation-specific data
189 */
190 struct auxtrace_queue {
191 struct list_head head;
192 pid_t tid;
193 int cpu;
194 bool set;
195 void *priv;
196 };
197
198 /**
199 * struct auxtrace_queues - an array of AUX area tracing queues.
200 * @queue_array: array of queues
201 * @nr_queues: number of queues
202 * @new_data: set whenever new data is queued
203 * @populated: queues have been fully populated using the auxtrace_index
204 * @next_buffer_nr: used to number each buffer
205 */
206 struct auxtrace_queues {
207 struct auxtrace_queue *queue_array;
208 unsigned int nr_queues;
209 bool new_data;
210 bool populated;
211 u64 next_buffer_nr;
212 };
213
214 /**
215 * struct auxtrace_heap_item - element of struct auxtrace_heap.
216 * @queue_nr: queue number
217 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
218 * to be a timestamp
219 */
220 struct auxtrace_heap_item {
221 unsigned int queue_nr;
222 u64 ordinal;
223 };
224
225 /**
226 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
227 * @heap_array: the heap
228 * @heap_cnt: the number of elements in the heap
229 * @heap_sz: maximum number of elements (grows as needed)
230 */
231 struct auxtrace_heap {
232 struct auxtrace_heap_item *heap_array;
233 unsigned int heap_cnt;
234 unsigned int heap_sz;
235 };
236
237 /**
238 * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
239 * @base: address of mapped area
240 * @userpg: pointer to buffer's perf_event_mmap_page
241 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
242 * @len: size of mapped area
243 * @prev: previous aux_head
244 * @idx: index of this mmap
245 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
246 * mmap) otherwise %0
247 * @cpu: cpu number for a per-cpu mmap otherwise %-1
248 */
249 struct auxtrace_mmap {
250 void *base;
251 void *userpg;
252 size_t mask;
253 size_t len;
254 u64 prev;
255 int idx;
256 pid_t tid;
257 int cpu;
258 };
259
260 /**
261 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
262 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
263 * @offset: file offset of mapped area
264 * @len: size of mapped area
265 * @prot: mmap memory protection
266 * @idx: index of this mmap
267 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
268 * mmap) otherwise %0
269 * @cpu: cpu number for a per-cpu mmap otherwise %-1
270 */
271 struct auxtrace_mmap_params {
272 size_t mask;
273 off_t offset;
274 size_t len;
275 int prot;
276 int idx;
277 pid_t tid;
278 int cpu;
279 };
280
281 /**
282 * struct auxtrace_record - callbacks for recording AUX area data.
283 * @recording_options: validate and process recording options
284 * @info_priv_size: return the size of the private data in auxtrace_info_event
285 * @info_fill: fill-in the private data in auxtrace_info_event
286 * @free: free this auxtrace record structure
287 * @snapshot_start: starting a snapshot
288 * @snapshot_finish: finishing a snapshot
289 * @find_snapshot: find data to snapshot within auxtrace mmap
290 * @parse_snapshot_options: parse snapshot options
291 * @reference: provide a 64-bit reference number for auxtrace_event
292 * @read_finish: called after reading from an auxtrace mmap
293 */
294 struct auxtrace_record {
295 int (*recording_options)(struct auxtrace_record *itr,
296 struct perf_evlist *evlist,
297 struct record_opts *opts);
298 size_t (*info_priv_size)(struct auxtrace_record *itr,
299 struct perf_evlist *evlist);
300 int (*info_fill)(struct auxtrace_record *itr,
301 struct perf_session *session,
302 struct auxtrace_info_event *auxtrace_info,
303 size_t priv_size);
304 void (*free)(struct auxtrace_record *itr);
305 int (*snapshot_start)(struct auxtrace_record *itr);
306 int (*snapshot_finish)(struct auxtrace_record *itr);
307 int (*find_snapshot)(struct auxtrace_record *itr, int idx,
308 struct auxtrace_mmap *mm, unsigned char *data,
309 u64 *head, u64 *old);
310 int (*parse_snapshot_options)(struct auxtrace_record *itr,
311 struct record_opts *opts,
312 const char *str);
313 u64 (*reference)(struct auxtrace_record *itr);
314 int (*read_finish)(struct auxtrace_record *itr, int idx);
315 unsigned int alignment;
316 };
317
318 #ifdef HAVE_AUXTRACE_SUPPORT
319
320 /*
321 * In snapshot mode the mmapped page is read-only which makes using
322 * __sync_val_compare_and_swap() problematic. However, snapshot mode expects
323 * the buffer is not updated while the snapshot is made (e.g. Intel PT disables
324 * the event) so there is not a race anyway.
325 */
326 static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
327 {
328 struct perf_event_mmap_page *pc = mm->userpg;
329 u64 head = ACCESS_ONCE(pc->aux_head);
330
331 /* Ensure all reads are done after we read the head */
332 rmb();
333 return head;
334 }
335
336 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
337 {
338 struct perf_event_mmap_page *pc = mm->userpg;
339 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
340 u64 head = ACCESS_ONCE(pc->aux_head);
341 #else
342 u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
343 #endif
344
345 /* Ensure all reads are done after we read the head */
346 rmb();
347 return head;
348 }
349
350 static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
351 {
352 struct perf_event_mmap_page *pc = mm->userpg;
353 #if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
354 u64 old_tail;
355 #endif
356
357 /* Ensure all reads are done before we write the tail out */
358 mb();
359 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
360 pc->aux_tail = tail;
361 #else
362 do {
363 old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0);
364 } while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail));
365 #endif
366 }
367
368 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
369 struct auxtrace_mmap_params *mp,
370 void *userpg, int fd);
371 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
372 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
373 off_t auxtrace_offset,
374 unsigned int auxtrace_pages,
375 bool auxtrace_overwrite);
376 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
377 struct perf_evlist *evlist, int idx,
378 bool per_cpu);
379
380 typedef int (*process_auxtrace_t)(struct perf_tool *tool,
381 union perf_event *event, void *data1,
382 size_t len1, void *data2, size_t len2);
383
384 int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
385 struct perf_tool *tool, process_auxtrace_t fn);
386
387 int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
388 struct auxtrace_record *itr,
389 struct perf_tool *tool, process_auxtrace_t fn,
390 size_t snapshot_size);
391
392 int auxtrace_queues__init(struct auxtrace_queues *queues);
393 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
394 struct perf_session *session,
395 union perf_event *event, off_t data_offset,
396 struct auxtrace_buffer **buffer_ptr);
397 void auxtrace_queues__free(struct auxtrace_queues *queues);
398 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
399 struct perf_session *session);
400 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
401 struct auxtrace_buffer *buffer);
402 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
403 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
404 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
405 void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
406
407 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
408 u64 ordinal);
409 void auxtrace_heap__pop(struct auxtrace_heap *heap);
410 void auxtrace_heap__free(struct auxtrace_heap *heap);
411
412 struct auxtrace_cache_entry {
413 struct hlist_node hash;
414 u32 key;
415 };
416
417 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
418 unsigned int limit_percent);
419 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
420 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
421 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
422 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
423 struct auxtrace_cache_entry *entry);
424 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
425
426 struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
427 int *err);
428
429 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
430 struct record_opts *opts,
431 const char *str);
432 int auxtrace_record__options(struct auxtrace_record *itr,
433 struct perf_evlist *evlist,
434 struct record_opts *opts);
435 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
436 struct perf_evlist *evlist);
437 int auxtrace_record__info_fill(struct auxtrace_record *itr,
438 struct perf_session *session,
439 struct auxtrace_info_event *auxtrace_info,
440 size_t priv_size);
441 void auxtrace_record__free(struct auxtrace_record *itr);
442 int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
443 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr);
444 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
445 struct auxtrace_mmap *mm,
446 unsigned char *data, u64 *head, u64 *old);
447 u64 auxtrace_record__reference(struct auxtrace_record *itr);
448
449 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
450 off_t file_offset);
451 int auxtrace_index__write(int fd, struct list_head *head);
452 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
453 bool needs_swap);
454 void auxtrace_index__free(struct list_head *head);
455
456 void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
457 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
458 const char *msg);
459
460 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
461 struct perf_tool *tool,
462 struct perf_session *session,
463 perf_event__handler_t process);
464 int perf_event__process_auxtrace_info(struct perf_tool *tool,
465 union perf_event *event,
466 struct perf_session *session);
467 s64 perf_event__process_auxtrace(struct perf_tool *tool,
468 union perf_event *event,
469 struct perf_session *session);
470 int perf_event__process_auxtrace_error(struct perf_tool *tool,
471 union perf_event *event,
472 struct perf_session *session);
473 int itrace_parse_synth_opts(const struct option *opt, const char *str,
474 int unset);
475 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts);
476
477 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
478 void perf_session__auxtrace_error_inc(struct perf_session *session,
479 union perf_event *event);
480 void events_stats__auxtrace_error_warn(const struct events_stats *stats);
481
482 static inline int auxtrace__process_event(struct perf_session *session,
483 union perf_event *event,
484 struct perf_sample *sample,
485 struct perf_tool *tool)
486 {
487 if (!session->auxtrace)
488 return 0;
489
490 return session->auxtrace->process_event(session, event, sample, tool);
491 }
492
493 static inline int auxtrace__flush_events(struct perf_session *session,
494 struct perf_tool *tool)
495 {
496 if (!session->auxtrace)
497 return 0;
498
499 return session->auxtrace->flush_events(session, tool);
500 }
501
502 static inline void auxtrace__free_events(struct perf_session *session)
503 {
504 if (!session->auxtrace)
505 return;
506
507 return session->auxtrace->free_events(session);
508 }
509
510 static inline void auxtrace__free(struct perf_session *session)
511 {
512 if (!session->auxtrace)
513 return;
514
515 return session->auxtrace->free(session);
516 }
517
518 #else
519
520 static inline struct auxtrace_record *
521 auxtrace_record__init(struct perf_evlist *evlist __maybe_unused,
522 int *err)
523 {
524 *err = 0;
525 return NULL;
526 }
527
528 static inline
529 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
530 {
531 }
532
533 static inline int
534 perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
535 struct perf_tool *tool __maybe_unused,
536 struct perf_session *session __maybe_unused,
537 perf_event__handler_t process __maybe_unused)
538 {
539 return -EINVAL;
540 }
541
542 static inline
543 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
544 struct perf_evlist *evlist __maybe_unused,
545 struct record_opts *opts __maybe_unused)
546 {
547 return 0;
548 }
549
550 #define perf_event__process_auxtrace_info 0
551 #define perf_event__process_auxtrace 0
552 #define perf_event__process_auxtrace_error 0
553
554 static inline
555 void perf_session__auxtrace_error_inc(struct perf_session *session
556 __maybe_unused,
557 union perf_event *event
558 __maybe_unused)
559 {
560 }
561
562 static inline
563 void events_stats__auxtrace_error_warn(const struct events_stats *stats
564 __maybe_unused)
565 {
566 }
567
568 static inline
569 int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
570 const char *str __maybe_unused,
571 int unset __maybe_unused)
572 {
573 pr_err("AUX area tracing not supported\n");
574 return -EINVAL;
575 }
576
577 static inline
578 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
579 struct record_opts *opts __maybe_unused,
580 const char *str)
581 {
582 if (!str)
583 return 0;
584 pr_err("AUX area tracing not supported\n");
585 return -EINVAL;
586 }
587
588 static inline
589 int auxtrace__process_event(struct perf_session *session __maybe_unused,
590 union perf_event *event __maybe_unused,
591 struct perf_sample *sample __maybe_unused,
592 struct perf_tool *tool __maybe_unused)
593 {
594 return 0;
595 }
596
597 static inline
598 int auxtrace__flush_events(struct perf_session *session __maybe_unused,
599 struct perf_tool *tool __maybe_unused)
600 {
601 return 0;
602 }
603
604 static inline
605 void auxtrace__free_events(struct perf_session *session __maybe_unused)
606 {
607 }
608
609 static inline
610 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
611 {
612 }
613
614 static inline
615 void auxtrace__free(struct perf_session *session __maybe_unused)
616 {
617 }
618
619 static inline
620 int auxtrace_index__write(int fd __maybe_unused,
621 struct list_head *head __maybe_unused)
622 {
623 return -EINVAL;
624 }
625
626 static inline
627 int auxtrace_index__process(int fd __maybe_unused,
628 u64 size __maybe_unused,
629 struct perf_session *session __maybe_unused,
630 bool needs_swap __maybe_unused)
631 {
632 return -EINVAL;
633 }
634
635 static inline
636 void auxtrace_index__free(struct list_head *head __maybe_unused)
637 {
638 }
639
640 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
641 struct auxtrace_mmap_params *mp,
642 void *userpg, int fd);
643 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
644 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
645 off_t auxtrace_offset,
646 unsigned int auxtrace_pages,
647 bool auxtrace_overwrite);
648 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
649 struct perf_evlist *evlist, int idx,
650 bool per_cpu);
651
652 #endif
653
654 #endif
This page took 0.065088 seconds and 5 git commands to generate.