perf data: Fix releasing event_class
[deliverable/linux.git] / tools / perf / util / data-convert-bt.c
1 /*
2 * CTF writing support via babeltrace.
3 *
4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
10 #include <linux/compiler.h>
11 #include <babeltrace/ctf-writer/writer.h>
12 #include <babeltrace/ctf-writer/clock.h>
13 #include <babeltrace/ctf-writer/stream.h>
14 #include <babeltrace/ctf-writer/event.h>
15 #include <babeltrace/ctf-writer/event-types.h>
16 #include <babeltrace/ctf-writer/event-fields.h>
17 #include <babeltrace/ctf-ir/utils.h>
18 #include <babeltrace/ctf/events.h>
19 #include <traceevent/event-parse.h>
20 #include "asm/bug.h"
21 #include "data-convert-bt.h"
22 #include "session.h"
23 #include "util.h"
24 #include "debug.h"
25 #include "tool.h"
26 #include "evlist.h"
27 #include "evsel.h"
28 #include "machine.h"
29
30 #define pr_N(n, fmt, ...) \
31 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
32
33 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
34 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
35
36 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
37
38 struct evsel_priv {
39 struct bt_ctf_event_class *event_class;
40 };
41
42 #define MAX_CPUS 4096
43
44 struct ctf_stream {
45 struct bt_ctf_stream *stream;
46 int cpu;
47 u32 count;
48 };
49
50 struct ctf_writer {
51 /* writer primitives */
52 struct bt_ctf_writer *writer;
53 struct ctf_stream **stream;
54 int stream_cnt;
55 struct bt_ctf_stream_class *stream_class;
56 struct bt_ctf_clock *clock;
57
58 /* data types */
59 union {
60 struct {
61 struct bt_ctf_field_type *s64;
62 struct bt_ctf_field_type *u64;
63 struct bt_ctf_field_type *s32;
64 struct bt_ctf_field_type *u32;
65 struct bt_ctf_field_type *string;
66 struct bt_ctf_field_type *u32_hex;
67 struct bt_ctf_field_type *u64_hex;
68 };
69 struct bt_ctf_field_type *array[6];
70 } data;
71 };
72
73 struct convert {
74 struct perf_tool tool;
75 struct ctf_writer writer;
76
77 u64 events_size;
78 u64 events_count;
79
80 /* Ordered events configured queue size. */
81 u64 queue_size;
82 };
83
84 static int value_set(struct bt_ctf_field_type *type,
85 struct bt_ctf_event *event,
86 const char *name, u64 val)
87 {
88 struct bt_ctf_field *field;
89 bool sign = bt_ctf_field_type_integer_get_signed(type);
90 int ret;
91
92 field = bt_ctf_field_create(type);
93 if (!field) {
94 pr_err("failed to create a field %s\n", name);
95 return -1;
96 }
97
98 if (sign) {
99 ret = bt_ctf_field_signed_integer_set_value(field, val);
100 if (ret) {
101 pr_err("failed to set field value %s\n", name);
102 goto err;
103 }
104 } else {
105 ret = bt_ctf_field_unsigned_integer_set_value(field, val);
106 if (ret) {
107 pr_err("failed to set field value %s\n", name);
108 goto err;
109 }
110 }
111
112 ret = bt_ctf_event_set_payload(event, name, field);
113 if (ret) {
114 pr_err("failed to set payload %s\n", name);
115 goto err;
116 }
117
118 pr2(" SET [%s = %" PRIu64 "]\n", name, val);
119
120 err:
121 bt_ctf_field_put(field);
122 return ret;
123 }
124
125 #define __FUNC_VALUE_SET(_name, _val_type) \
126 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
127 struct bt_ctf_event *event, \
128 const char *name, \
129 _val_type val) \
130 { \
131 struct bt_ctf_field_type *type = cw->data._name; \
132 return value_set(type, event, name, (u64) val); \
133 }
134
135 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
136
137 FUNC_VALUE_SET(s32)
138 FUNC_VALUE_SET(u32)
139 FUNC_VALUE_SET(s64)
140 FUNC_VALUE_SET(u64)
141 __FUNC_VALUE_SET(u64_hex, u64)
142
143 static struct bt_ctf_field_type*
144 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
145 {
146 unsigned long flags = field->flags;
147
148 if (flags & FIELD_IS_STRING)
149 return cw->data.string;
150
151 if (!(flags & FIELD_IS_SIGNED)) {
152 /* unsigned long are mostly pointers */
153 if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
154 return cw->data.u64_hex;
155 }
156
157 if (flags & FIELD_IS_SIGNED) {
158 if (field->size == 8)
159 return cw->data.s64;
160 else
161 return cw->data.s32;
162 }
163
164 if (field->size == 8)
165 return cw->data.u64;
166 else
167 return cw->data.u32;
168 }
169
170 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
171 {
172 unsigned long long value_mask;
173
174 /*
175 * value_mask = (1 << (size * 8 - 1)) - 1.
176 * Directly set value_mask for code readers.
177 */
178 switch (size) {
179 case 1:
180 value_mask = 0x7fULL;
181 break;
182 case 2:
183 value_mask = 0x7fffULL;
184 break;
185 case 4:
186 value_mask = 0x7fffffffULL;
187 break;
188 case 8:
189 /*
190 * For 64 bit value, return it self. There is no need
191 * to fill high bit.
192 */
193 /* Fall through */
194 default:
195 /* BUG! */
196 return value_int;
197 }
198
199 /* If it is a positive value, don't adjust. */
200 if ((value_int & (~0ULL - value_mask)) == 0)
201 return value_int;
202
203 /* Fill upper part of value_int with 1 to make it a negative long long. */
204 return (value_int & value_mask) | ~value_mask;
205 }
206
207 static int add_tracepoint_field_value(struct ctf_writer *cw,
208 struct bt_ctf_event_class *event_class,
209 struct bt_ctf_event *event,
210 struct perf_sample *sample,
211 struct format_field *fmtf)
212 {
213 struct bt_ctf_field_type *type;
214 struct bt_ctf_field *array_field;
215 struct bt_ctf_field *field;
216 const char *name = fmtf->name;
217 void *data = sample->raw_data;
218 unsigned long flags = fmtf->flags;
219 unsigned int n_items;
220 unsigned int i;
221 unsigned int offset;
222 unsigned int len;
223 int ret;
224
225 name = fmtf->alias;
226 offset = fmtf->offset;
227 len = fmtf->size;
228 if (flags & FIELD_IS_STRING)
229 flags &= ~FIELD_IS_ARRAY;
230
231 if (flags & FIELD_IS_DYNAMIC) {
232 unsigned long long tmp_val;
233
234 tmp_val = pevent_read_number(fmtf->event->pevent,
235 data + offset, len);
236 offset = tmp_val;
237 len = offset >> 16;
238 offset &= 0xffff;
239 }
240
241 if (flags & FIELD_IS_ARRAY) {
242
243 type = bt_ctf_event_class_get_field_by_name(
244 event_class, name);
245 array_field = bt_ctf_field_create(type);
246 bt_ctf_field_type_put(type);
247 if (!array_field) {
248 pr_err("Failed to create array type %s\n", name);
249 return -1;
250 }
251
252 len = fmtf->size / fmtf->arraylen;
253 n_items = fmtf->arraylen;
254 } else {
255 n_items = 1;
256 array_field = NULL;
257 }
258
259 type = get_tracepoint_field_type(cw, fmtf);
260
261 for (i = 0; i < n_items; i++) {
262 if (flags & FIELD_IS_ARRAY)
263 field = bt_ctf_field_array_get_field(array_field, i);
264 else
265 field = bt_ctf_field_create(type);
266
267 if (!field) {
268 pr_err("failed to create a field %s\n", name);
269 return -1;
270 }
271
272 if (flags & FIELD_IS_STRING)
273 ret = bt_ctf_field_string_set_value(field,
274 data + offset + i * len);
275 else {
276 unsigned long long value_int;
277
278 value_int = pevent_read_number(
279 fmtf->event->pevent,
280 data + offset + i * len, len);
281
282 if (!(flags & FIELD_IS_SIGNED))
283 ret = bt_ctf_field_unsigned_integer_set_value(
284 field, value_int);
285 else
286 ret = bt_ctf_field_signed_integer_set_value(
287 field, adjust_signedness(value_int, len));
288 }
289
290 if (ret) {
291 pr_err("failed to set file value %s\n", name);
292 goto err_put_field;
293 }
294 if (!(flags & FIELD_IS_ARRAY)) {
295 ret = bt_ctf_event_set_payload(event, name, field);
296 if (ret) {
297 pr_err("failed to set payload %s\n", name);
298 goto err_put_field;
299 }
300 }
301 bt_ctf_field_put(field);
302 }
303 if (flags & FIELD_IS_ARRAY) {
304 ret = bt_ctf_event_set_payload(event, name, array_field);
305 if (ret) {
306 pr_err("Failed add payload array %s\n", name);
307 return -1;
308 }
309 bt_ctf_field_put(array_field);
310 }
311 return 0;
312
313 err_put_field:
314 bt_ctf_field_put(field);
315 return -1;
316 }
317
318 static int add_tracepoint_fields_values(struct ctf_writer *cw,
319 struct bt_ctf_event_class *event_class,
320 struct bt_ctf_event *event,
321 struct format_field *fields,
322 struct perf_sample *sample)
323 {
324 struct format_field *field;
325 int ret;
326
327 for (field = fields; field; field = field->next) {
328 ret = add_tracepoint_field_value(cw, event_class, event, sample,
329 field);
330 if (ret)
331 return -1;
332 }
333 return 0;
334 }
335
336 static int add_tracepoint_values(struct ctf_writer *cw,
337 struct bt_ctf_event_class *event_class,
338 struct bt_ctf_event *event,
339 struct perf_evsel *evsel,
340 struct perf_sample *sample)
341 {
342 struct format_field *common_fields = evsel->tp_format->format.common_fields;
343 struct format_field *fields = evsel->tp_format->format.fields;
344 int ret;
345
346 ret = add_tracepoint_fields_values(cw, event_class, event,
347 common_fields, sample);
348 if (!ret)
349 ret = add_tracepoint_fields_values(cw, event_class, event,
350 fields, sample);
351
352 return ret;
353 }
354
355 static int add_generic_values(struct ctf_writer *cw,
356 struct bt_ctf_event *event,
357 struct perf_evsel *evsel,
358 struct perf_sample *sample)
359 {
360 u64 type = evsel->attr.sample_type;
361 int ret;
362
363 /*
364 * missing:
365 * PERF_SAMPLE_TIME - not needed as we have it in
366 * ctf event header
367 * PERF_SAMPLE_READ - TODO
368 * PERF_SAMPLE_CALLCHAIN - TODO
369 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
370 * PERF_SAMPLE_BRANCH_STACK - TODO
371 * PERF_SAMPLE_REGS_USER - TODO
372 * PERF_SAMPLE_STACK_USER - TODO
373 */
374
375 if (type & PERF_SAMPLE_IP) {
376 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
377 if (ret)
378 return -1;
379 }
380
381 if (type & PERF_SAMPLE_TID) {
382 ret = value_set_s32(cw, event, "perf_tid", sample->tid);
383 if (ret)
384 return -1;
385
386 ret = value_set_s32(cw, event, "perf_pid", sample->pid);
387 if (ret)
388 return -1;
389 }
390
391 if ((type & PERF_SAMPLE_ID) ||
392 (type & PERF_SAMPLE_IDENTIFIER)) {
393 ret = value_set_u64(cw, event, "perf_id", sample->id);
394 if (ret)
395 return -1;
396 }
397
398 if (type & PERF_SAMPLE_STREAM_ID) {
399 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
400 if (ret)
401 return -1;
402 }
403
404 if (type & PERF_SAMPLE_PERIOD) {
405 ret = value_set_u64(cw, event, "perf_period", sample->period);
406 if (ret)
407 return -1;
408 }
409
410 if (type & PERF_SAMPLE_WEIGHT) {
411 ret = value_set_u64(cw, event, "perf_weight", sample->weight);
412 if (ret)
413 return -1;
414 }
415
416 if (type & PERF_SAMPLE_DATA_SRC) {
417 ret = value_set_u64(cw, event, "perf_data_src",
418 sample->data_src);
419 if (ret)
420 return -1;
421 }
422
423 if (type & PERF_SAMPLE_TRANSACTION) {
424 ret = value_set_u64(cw, event, "perf_transaction",
425 sample->transaction);
426 if (ret)
427 return -1;
428 }
429
430 return 0;
431 }
432
433 static int ctf_stream__flush(struct ctf_stream *cs)
434 {
435 int err = 0;
436
437 if (cs) {
438 err = bt_ctf_stream_flush(cs->stream);
439 if (err)
440 pr_err("CTF stream %d flush failed\n", cs->cpu);
441
442 pr("Flush stream for cpu %d (%u samples)\n",
443 cs->cpu, cs->count);
444
445 cs->count = 0;
446 }
447
448 return err;
449 }
450
451 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
452 {
453 struct ctf_stream *cs;
454 struct bt_ctf_field *pkt_ctx = NULL;
455 struct bt_ctf_field *cpu_field = NULL;
456 struct bt_ctf_stream *stream = NULL;
457 int ret;
458
459 cs = zalloc(sizeof(*cs));
460 if (!cs) {
461 pr_err("Failed to allocate ctf stream\n");
462 return NULL;
463 }
464
465 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
466 if (!stream) {
467 pr_err("Failed to create CTF stream\n");
468 goto out;
469 }
470
471 pkt_ctx = bt_ctf_stream_get_packet_context(stream);
472 if (!pkt_ctx) {
473 pr_err("Failed to obtain packet context\n");
474 goto out;
475 }
476
477 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
478 bt_ctf_field_put(pkt_ctx);
479 if (!cpu_field) {
480 pr_err("Failed to obtain cpu field\n");
481 goto out;
482 }
483
484 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
485 if (ret) {
486 pr_err("Failed to update CPU number\n");
487 goto out;
488 }
489
490 bt_ctf_field_put(cpu_field);
491
492 cs->cpu = cpu;
493 cs->stream = stream;
494 return cs;
495
496 out:
497 if (cpu_field)
498 bt_ctf_field_put(cpu_field);
499 if (stream)
500 bt_ctf_stream_put(stream);
501
502 free(cs);
503 return NULL;
504 }
505
506 static void ctf_stream__delete(struct ctf_stream *cs)
507 {
508 if (cs) {
509 bt_ctf_stream_put(cs->stream);
510 free(cs);
511 }
512 }
513
514 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
515 {
516 struct ctf_stream *cs = cw->stream[cpu];
517
518 if (!cs) {
519 cs = ctf_stream__create(cw, cpu);
520 cw->stream[cpu] = cs;
521 }
522
523 return cs;
524 }
525
526 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
527 struct perf_evsel *evsel)
528 {
529 int cpu = 0;
530
531 if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
532 cpu = sample->cpu;
533
534 if (cpu > cw->stream_cnt) {
535 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
536 cpu, cw->stream_cnt);
537 cpu = 0;
538 }
539
540 return cpu;
541 }
542
543 #define STREAM_FLUSH_COUNT 100000
544
545 /*
546 * Currently we have no other way to determine the
547 * time for the stream flush other than keep track
548 * of the number of events and check it against
549 * threshold.
550 */
551 static bool is_flush_needed(struct ctf_stream *cs)
552 {
553 return cs->count >= STREAM_FLUSH_COUNT;
554 }
555
556 static int process_sample_event(struct perf_tool *tool,
557 union perf_event *_event __maybe_unused,
558 struct perf_sample *sample,
559 struct perf_evsel *evsel,
560 struct machine *machine __maybe_unused)
561 {
562 struct convert *c = container_of(tool, struct convert, tool);
563 struct evsel_priv *priv = evsel->priv;
564 struct ctf_writer *cw = &c->writer;
565 struct ctf_stream *cs;
566 struct bt_ctf_event_class *event_class;
567 struct bt_ctf_event *event;
568 int ret;
569
570 if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
571 return 0;
572
573 event_class = priv->event_class;
574
575 /* update stats */
576 c->events_count++;
577 c->events_size += _event->header.size;
578
579 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
580
581 event = bt_ctf_event_create(event_class);
582 if (!event) {
583 pr_err("Failed to create an CTF event\n");
584 return -1;
585 }
586
587 bt_ctf_clock_set_time(cw->clock, sample->time);
588
589 ret = add_generic_values(cw, event, evsel, sample);
590 if (ret)
591 return -1;
592
593 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
594 ret = add_tracepoint_values(cw, event_class, event,
595 evsel, sample);
596 if (ret)
597 return -1;
598 }
599
600 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
601 if (cs) {
602 if (is_flush_needed(cs))
603 ctf_stream__flush(cs);
604
605 cs->count++;
606 bt_ctf_stream_append_event(cs->stream, event);
607 }
608
609 bt_ctf_event_put(event);
610 return cs ? 0 : -1;
611 }
612
613 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
614 static char *change_name(char *name, char *orig_name, int dup)
615 {
616 char *new_name = NULL;
617 size_t len;
618
619 if (!name)
620 name = orig_name;
621
622 if (dup >= 10)
623 goto out;
624 /*
625 * Add '_' prefix to potential keywork. According to
626 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
627 * futher CTF spec updating may require us to use '$'.
628 */
629 if (dup < 0)
630 len = strlen(name) + sizeof("_");
631 else
632 len = strlen(orig_name) + sizeof("_dupl_X");
633
634 new_name = malloc(len);
635 if (!new_name)
636 goto out;
637
638 if (dup < 0)
639 snprintf(new_name, len, "_%s", name);
640 else
641 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
642
643 out:
644 if (name != orig_name)
645 free(name);
646 return new_name;
647 }
648
649 static int event_class_add_field(struct bt_ctf_event_class *event_class,
650 struct bt_ctf_field_type *type,
651 struct format_field *field)
652 {
653 struct bt_ctf_field_type *t = NULL;
654 char *name;
655 int dup = 1;
656 int ret;
657
658 /* alias was already assigned */
659 if (field->alias != field->name)
660 return bt_ctf_event_class_add_field(event_class, type,
661 (char *)field->alias);
662
663 name = field->name;
664
665 /* If 'name' is a keywork, add prefix. */
666 if (bt_ctf_validate_identifier(name))
667 name = change_name(name, field->name, -1);
668
669 if (!name) {
670 pr_err("Failed to fix invalid identifier.");
671 return -1;
672 }
673 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
674 bt_ctf_field_type_put(t);
675 name = change_name(name, field->name, dup++);
676 if (!name) {
677 pr_err("Failed to create dup name for '%s'\n", field->name);
678 return -1;
679 }
680 }
681
682 ret = bt_ctf_event_class_add_field(event_class, type, name);
683 if (!ret)
684 field->alias = name;
685
686 return ret;
687 }
688
689 static int add_tracepoint_fields_types(struct ctf_writer *cw,
690 struct format_field *fields,
691 struct bt_ctf_event_class *event_class)
692 {
693 struct format_field *field;
694 int ret;
695
696 for (field = fields; field; field = field->next) {
697 struct bt_ctf_field_type *type;
698 unsigned long flags = field->flags;
699
700 pr2(" field '%s'\n", field->name);
701
702 type = get_tracepoint_field_type(cw, field);
703 if (!type)
704 return -1;
705
706 /*
707 * A string is an array of chars. For this we use the string
708 * type and don't care that it is an array. What we don't
709 * support is an array of strings.
710 */
711 if (flags & FIELD_IS_STRING)
712 flags &= ~FIELD_IS_ARRAY;
713
714 if (flags & FIELD_IS_ARRAY)
715 type = bt_ctf_field_type_array_create(type, field->arraylen);
716
717 ret = event_class_add_field(event_class, type, field);
718
719 if (flags & FIELD_IS_ARRAY)
720 bt_ctf_field_type_put(type);
721
722 if (ret) {
723 pr_err("Failed to add field '%s': %d\n",
724 field->name, ret);
725 return -1;
726 }
727 }
728
729 return 0;
730 }
731
732 static int add_tracepoint_types(struct ctf_writer *cw,
733 struct perf_evsel *evsel,
734 struct bt_ctf_event_class *class)
735 {
736 struct format_field *common_fields = evsel->tp_format->format.common_fields;
737 struct format_field *fields = evsel->tp_format->format.fields;
738 int ret;
739
740 ret = add_tracepoint_fields_types(cw, common_fields, class);
741 if (!ret)
742 ret = add_tracepoint_fields_types(cw, fields, class);
743
744 return ret;
745 }
746
747 static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
748 struct bt_ctf_event_class *event_class)
749 {
750 u64 type = evsel->attr.sample_type;
751
752 /*
753 * missing:
754 * PERF_SAMPLE_TIME - not needed as we have it in
755 * ctf event header
756 * PERF_SAMPLE_READ - TODO
757 * PERF_SAMPLE_CALLCHAIN - TODO
758 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
759 * PERF_SAMPLE_BRANCH_STACK - TODO
760 * PERF_SAMPLE_REGS_USER - TODO
761 * PERF_SAMPLE_STACK_USER - TODO
762 */
763
764 #define ADD_FIELD(cl, t, n) \
765 do { \
766 pr2(" field '%s'\n", n); \
767 if (bt_ctf_event_class_add_field(cl, t, n)) { \
768 pr_err("Failed to add field '%s';\n", n); \
769 return -1; \
770 } \
771 } while (0)
772
773 if (type & PERF_SAMPLE_IP)
774 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
775
776 if (type & PERF_SAMPLE_TID) {
777 ADD_FIELD(event_class, cw->data.s32, "perf_tid");
778 ADD_FIELD(event_class, cw->data.s32, "perf_pid");
779 }
780
781 if ((type & PERF_SAMPLE_ID) ||
782 (type & PERF_SAMPLE_IDENTIFIER))
783 ADD_FIELD(event_class, cw->data.u64, "perf_id");
784
785 if (type & PERF_SAMPLE_STREAM_ID)
786 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
787
788 if (type & PERF_SAMPLE_PERIOD)
789 ADD_FIELD(event_class, cw->data.u64, "perf_period");
790
791 if (type & PERF_SAMPLE_WEIGHT)
792 ADD_FIELD(event_class, cw->data.u64, "perf_weight");
793
794 if (type & PERF_SAMPLE_DATA_SRC)
795 ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
796
797 if (type & PERF_SAMPLE_TRANSACTION)
798 ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
799
800 #undef ADD_FIELD
801 return 0;
802 }
803
804 static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
805 {
806 struct bt_ctf_event_class *event_class;
807 struct evsel_priv *priv;
808 const char *name = perf_evsel__name(evsel);
809 int ret;
810
811 pr("Adding event '%s' (type %d)\n", name, evsel->attr.type);
812
813 event_class = bt_ctf_event_class_create(name);
814 if (!event_class)
815 return -1;
816
817 ret = add_generic_types(cw, evsel, event_class);
818 if (ret)
819 goto err;
820
821 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
822 ret = add_tracepoint_types(cw, evsel, event_class);
823 if (ret)
824 goto err;
825 }
826
827 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
828 if (ret) {
829 pr("Failed to add event class into stream.\n");
830 goto err;
831 }
832
833 priv = malloc(sizeof(*priv));
834 if (!priv)
835 goto err;
836
837 priv->event_class = event_class;
838 evsel->priv = priv;
839 return 0;
840
841 err:
842 bt_ctf_event_class_put(event_class);
843 pr_err("Failed to add event '%s'.\n", name);
844 return -1;
845 }
846
847 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
848 {
849 struct perf_evlist *evlist = session->evlist;
850 struct perf_evsel *evsel;
851 int ret;
852
853 evlist__for_each(evlist, evsel) {
854 ret = add_event(cw, evsel);
855 if (ret)
856 return ret;
857 }
858 return 0;
859 }
860
861 static void cleanup_events(struct perf_session *session)
862 {
863 struct perf_evlist *evlist = session->evlist;
864 struct perf_evsel *evsel;
865
866 evlist__for_each(evlist, evsel) {
867 struct evsel_priv *priv;
868
869 priv = evsel->priv;
870 bt_ctf_event_class_put(priv->event_class);
871 zfree(&evsel->priv);
872 }
873
874 perf_evlist__delete(evlist);
875 session->evlist = NULL;
876 }
877
878 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
879 {
880 struct ctf_stream **stream;
881 struct perf_header *ph = &session->header;
882 int ncpus;
883
884 /*
885 * Try to get the number of cpus used in the data file,
886 * if not present fallback to the MAX_CPUS.
887 */
888 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
889
890 stream = zalloc(sizeof(*stream) * ncpus);
891 if (!stream) {
892 pr_err("Failed to allocate streams.\n");
893 return -ENOMEM;
894 }
895
896 cw->stream = stream;
897 cw->stream_cnt = ncpus;
898 return 0;
899 }
900
901 static void free_streams(struct ctf_writer *cw)
902 {
903 int cpu;
904
905 for (cpu = 0; cpu < cw->stream_cnt; cpu++)
906 ctf_stream__delete(cw->stream[cpu]);
907
908 free(cw->stream);
909 }
910
911 static int ctf_writer__setup_env(struct ctf_writer *cw,
912 struct perf_session *session)
913 {
914 struct perf_header *header = &session->header;
915 struct bt_ctf_writer *writer = cw->writer;
916
917 #define ADD(__n, __v) \
918 do { \
919 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
920 return -1; \
921 } while (0)
922
923 ADD("host", header->env.hostname);
924 ADD("sysname", "Linux");
925 ADD("release", header->env.os_release);
926 ADD("version", header->env.version);
927 ADD("machine", header->env.arch);
928 ADD("domain", "kernel");
929 ADD("tracer_name", "perf");
930
931 #undef ADD
932 return 0;
933 }
934
935 static int ctf_writer__setup_clock(struct ctf_writer *cw)
936 {
937 struct bt_ctf_clock *clock = cw->clock;
938
939 bt_ctf_clock_set_description(clock, "perf clock");
940
941 #define SET(__n, __v) \
942 do { \
943 if (bt_ctf_clock_set_##__n(clock, __v)) \
944 return -1; \
945 } while (0)
946
947 SET(frequency, 1000000000);
948 SET(offset_s, 0);
949 SET(offset, 0);
950 SET(precision, 10);
951 SET(is_absolute, 0);
952
953 #undef SET
954 return 0;
955 }
956
957 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
958 {
959 struct bt_ctf_field_type *type;
960
961 type = bt_ctf_field_type_integer_create(size);
962 if (!type)
963 return NULL;
964
965 if (sign &&
966 bt_ctf_field_type_integer_set_signed(type, 1))
967 goto err;
968
969 if (hex &&
970 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
971 goto err;
972
973 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
974 size, sign ? "un" : "", hex ? "hex" : "");
975 return type;
976
977 err:
978 bt_ctf_field_type_put(type);
979 return NULL;
980 }
981
982 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
983 {
984 unsigned int i;
985
986 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
987 bt_ctf_field_type_put(cw->data.array[i]);
988 }
989
990 static int ctf_writer__init_data(struct ctf_writer *cw)
991 {
992 #define CREATE_INT_TYPE(type, size, sign, hex) \
993 do { \
994 (type) = create_int_type(size, sign, hex); \
995 if (!(type)) \
996 goto err; \
997 } while (0)
998
999 CREATE_INT_TYPE(cw->data.s64, 64, true, false);
1000 CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1001 CREATE_INT_TYPE(cw->data.s32, 32, true, false);
1002 CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1003 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1004 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1005
1006 cw->data.string = bt_ctf_field_type_string_create();
1007 if (cw->data.string)
1008 return 0;
1009
1010 err:
1011 ctf_writer__cleanup_data(cw);
1012 pr_err("Failed to create data types.\n");
1013 return -1;
1014 }
1015
1016 static void ctf_writer__cleanup(struct ctf_writer *cw)
1017 {
1018 ctf_writer__cleanup_data(cw);
1019
1020 bt_ctf_clock_put(cw->clock);
1021 free_streams(cw);
1022 bt_ctf_stream_class_put(cw->stream_class);
1023 bt_ctf_writer_put(cw->writer);
1024
1025 /* and NULL all the pointers */
1026 memset(cw, 0, sizeof(*cw));
1027 }
1028
1029 static int ctf_writer__init(struct ctf_writer *cw, const char *path)
1030 {
1031 struct bt_ctf_writer *writer;
1032 struct bt_ctf_stream_class *stream_class;
1033 struct bt_ctf_clock *clock;
1034 struct bt_ctf_field_type *pkt_ctx_type;
1035 int ret;
1036
1037 /* CTF writer */
1038 writer = bt_ctf_writer_create(path);
1039 if (!writer)
1040 goto err;
1041
1042 cw->writer = writer;
1043
1044 /* CTF clock */
1045 clock = bt_ctf_clock_create("perf_clock");
1046 if (!clock) {
1047 pr("Failed to create CTF clock.\n");
1048 goto err_cleanup;
1049 }
1050
1051 cw->clock = clock;
1052
1053 if (ctf_writer__setup_clock(cw)) {
1054 pr("Failed to setup CTF clock.\n");
1055 goto err_cleanup;
1056 }
1057
1058 /* CTF stream class */
1059 stream_class = bt_ctf_stream_class_create("perf_stream");
1060 if (!stream_class) {
1061 pr("Failed to create CTF stream class.\n");
1062 goto err_cleanup;
1063 }
1064
1065 cw->stream_class = stream_class;
1066
1067 /* CTF clock stream setup */
1068 if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1069 pr("Failed to assign CTF clock to stream class.\n");
1070 goto err_cleanup;
1071 }
1072
1073 if (ctf_writer__init_data(cw))
1074 goto err_cleanup;
1075
1076 /* Add cpu_id for packet context */
1077 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1078 if (!pkt_ctx_type)
1079 goto err_cleanup;
1080
1081 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1082 bt_ctf_field_type_put(pkt_ctx_type);
1083 if (ret)
1084 goto err_cleanup;
1085
1086 /* CTF clock writer setup */
1087 if (bt_ctf_writer_add_clock(writer, clock)) {
1088 pr("Failed to assign CTF clock to writer.\n");
1089 goto err_cleanup;
1090 }
1091
1092 return 0;
1093
1094 err_cleanup:
1095 ctf_writer__cleanup(cw);
1096 err:
1097 pr_err("Failed to setup CTF writer.\n");
1098 return -1;
1099 }
1100
1101 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1102 {
1103 int cpu, ret = 0;
1104
1105 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1106 ret = ctf_stream__flush(cw->stream[cpu]);
1107
1108 return ret;
1109 }
1110
1111 static int convert__config(const char *var, const char *value, void *cb)
1112 {
1113 struct convert *c = cb;
1114
1115 if (!strcmp(var, "convert.queue-size")) {
1116 c->queue_size = perf_config_u64(var, value);
1117 return 0;
1118 }
1119
1120 return perf_default_config(var, value, cb);
1121 }
1122
1123 int bt_convert__perf2ctf(const char *input, const char *path, bool force)
1124 {
1125 struct perf_session *session;
1126 struct perf_data_file file = {
1127 .path = input,
1128 .mode = PERF_DATA_MODE_READ,
1129 .force = force,
1130 };
1131 struct convert c = {
1132 .tool = {
1133 .sample = process_sample_event,
1134 .mmap = perf_event__process_mmap,
1135 .mmap2 = perf_event__process_mmap2,
1136 .comm = perf_event__process_comm,
1137 .exit = perf_event__process_exit,
1138 .fork = perf_event__process_fork,
1139 .lost = perf_event__process_lost,
1140 .tracing_data = perf_event__process_tracing_data,
1141 .build_id = perf_event__process_build_id,
1142 .ordered_events = true,
1143 .ordering_requires_timestamps = true,
1144 },
1145 };
1146 struct ctf_writer *cw = &c.writer;
1147 int err = -1;
1148
1149 perf_config(convert__config, &c);
1150
1151 /* CTF writer */
1152 if (ctf_writer__init(cw, path))
1153 return -1;
1154
1155 /* perf.data session */
1156 session = perf_session__new(&file, 0, &c.tool);
1157 if (!session)
1158 goto free_writer;
1159
1160 if (c.queue_size) {
1161 ordered_events__set_alloc_size(&session->ordered_events,
1162 c.queue_size);
1163 }
1164
1165 /* CTF writer env/clock setup */
1166 if (ctf_writer__setup_env(cw, session))
1167 goto free_session;
1168
1169 /* CTF events setup */
1170 if (setup_events(cw, session))
1171 goto free_session;
1172
1173 if (setup_streams(cw, session))
1174 goto free_session;
1175
1176 err = perf_session__process_events(session);
1177 if (!err)
1178 err = ctf_writer__flush_streams(cw);
1179 else
1180 pr_err("Error during conversion.\n");
1181
1182 fprintf(stderr,
1183 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1184 file.path, path);
1185
1186 fprintf(stderr,
1187 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
1188 (double) c.events_size / 1024.0 / 1024.0,
1189 c.events_count);
1190
1191 cleanup_events(session);
1192 perf_session__delete(session);
1193 ctf_writer__cleanup(cw);
1194
1195 return err;
1196
1197 free_session:
1198 perf_session__delete(session);
1199 free_writer:
1200 ctf_writer__cleanup(cw);
1201 pr_err("Error during conversion setup.\n");
1202 return err;
1203 }
This page took 0.079092 seconds and 5 git commands to generate.