61fac2fec8ff618cecd409cea610d016218fcfc6
[deliverable/lttng-modules.git] / probes / lttng-events.h
1 #include <lttng.h>
2 #include <lttng-types.h>
3
4 /*
5 * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to
6 * strcpy().
7 */
8 #undef tp_assign
9 #define tp_assign(dest, src) \
10 lib_ring_buffer_align_ctx(config, &ctx, sizeof(src)); \
11 lib_ring_buffer_write(config, &ctx, &src, sizeof(src));
12
13 #undef tp_memcpy
14 #define tp_memcpy(dest, src, len) \
15 lib_ring_buffer_align_ctx(config, &ctx, sizeof(*(src))); \
16 lib_ring_buffer_write(config, &ctx, &src, len);
17
18 /* TODO */
19 #undef tp_strcpy
20 #define tp_strcpy(dest, src) __assign_str(dest, src);
21
22 struct lttng_event_field {
23 const char *name;
24 const struct lttng_type type;
25 };
26
27 struct lttng_event_desc {
28 const struct lttng_event_field *fields;
29 };
30
31 /*
32 * Stage 1 of the trace events.
33 *
34 * Create event field type metadata section.
35 * Each event produce an array of fields.
36 */
37
38 /*
39 * DECLARE_EVENT_CLASS can be used to add a generic function
40 * handlers for events. That is, if all events have the same
41 * parameters and just have distinct trace points.
42 * Each tracepoint can be defined with DEFINE_EVENT and that
43 * will map the DECLARE_EVENT_CLASS to the tracepoint.
44 *
45 * TRACE_EVENT is a one to one mapping between tracepoint and template.
46 */
47 #undef TRACE_EVENT
48 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
49 DECLARE_EVENT_CLASS(name, \
50 PARAMS(proto), \
51 PARAMS(args), \
52 PARAMS(tstruct), \
53 PARAMS(assign), \
54 PARAMS(print)) \
55 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args))
56
57 /* Named field types must be defined in lttng-types.h */
58
59 #undef __field
60 #define __field(_type, _item) \
61 { .name = #_item, .type = { .atype = atype_integer, .name = #_type} },
62
63 #undef __field_ext
64 #define __field_ext(_type, _item, _filter_type) \
65 { .name = #_item, .type = { .atype = atype_integer, .name = #_type} },
66
67 #undef __array
68 #define __array(_type, _item, _length) \
69 { \
70 .name = #_item, \
71 .type = { \
72 .atype = atype_array, \
73 .name = NULL, \
74 .u.array.elem_type = #_type, \
75 .u.array.length = _length, \
76 }, \
77 },
78
79 #undef __dynamic_array
80 #define __dynamic_array(_type, _item, _length) \
81 { \
82 .name = #_item, \
83 .type = { \
84 .atype = atype_sequence, \
85 .name = NULL, \
86 .u.sequence.elem_type = #_type, \
87 .u.sequence.length_type = "u32", \
88 }, \
89 },
90
91 #undef __string
92 #define __string(_item, _src) \
93 { \
94 .name = _item, \
95 .type = { \
96 .atype = atype_string, \
97 .name = NULL, \
98 .u.string.encoding = lttng_encode_UTF8, \
99 }, \
100 },
101
102 #undef TP_PROTO
103 #define TP_PROTO(args...)
104
105 #undef TP_ARGS
106 #define TP_ARGS(args...)
107
108 #undef TP_STRUCT__entry
109 #define TP_STRUCT__entry(args...) args /* Only one used in this phase */
110
111 #undef TP_fast_assign
112 #define TP_fast_assign(args...)
113
114 #undef TP_printk
115 #define TP_printk(args...)
116
117 #undef DECLARE_EVENT_CLASS
118 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
119 static const struct lttng_event_field __event_fields___##name[] = { \
120 tstruct \
121 };
122
123 #undef DEFINE_EVENT
124 #define DEFINE_EVENT(template, name, proto, args)
125
126 #undef DEFINE_EVENT_PRINT
127 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
128 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
129
130 /* Callbacks are meaningless to LTTng. */
131 #undef TRACE_EVENT_FN
132 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
133 assign, print, reg, unreg) \
134 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
135 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
136
137 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
138
139 /*
140 * Stage 2 of the trace events.
141 *
142 * Create an array of events.
143 */
144
145 /*
146 * DECLARE_EVENT_CLASS can be used to add a generic function
147 * handlers for events. That is, if all events have the same
148 * parameters and just have distinct trace points.
149 * Each tracepoint can be defined with DEFINE_EVENT and that
150 * will map the DECLARE_EVENT_CLASS to the tracepoint.
151 *
152 * TRACE_EVENT is a one to one mapping between tracepoint and template.
153 */
154 #undef TRACE_EVENT
155 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
156 DECLARE_EVENT_CLASS(name, \
157 PARAMS(proto), \
158 PARAMS(args), \
159 PARAMS(tstruct), \
160 PARAMS(assign), \
161 PARAMS(print)) \
162 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args))
163
164 /* Named field types must be defined in lttng-types.h */
165
166 #undef __field
167 #define __field(_type, _item)
168
169 #undef __field_ext
170 #define __field_ext(_type, _item, _filter_type)
171
172 #undef __array
173 #define __array(_type, _item, _length)
174
175 #undef __dynamic_array
176 #define __dynamic_array(_type, _item, _length)
177
178 #undef __string
179 #define __string(_item, _src)
180
181 #undef TP_PROTO
182 #define TP_PROTO(args...)
183
184 #undef TP_ARGS
185 #define TP_ARGS(args...)
186
187 #undef TP_STRUCT__entry
188 #define TP_STRUCT__entry(args...)
189
190 #undef TP_fast_assign
191 #define TP_fast_assign(args...)
192
193 #undef TP_printk
194 #define TP_printk(args...)
195
196 #undef DECLARE_EVENT_CLASS
197 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
198 { .fields = __event_fields___##name },
199
200 #undef DEFINE_EVENT
201 #define DEFINE_EVENT(template, name, proto, args)
202
203 #undef DEFINE_EVENT_PRINT
204 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
205 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
206
207 /* Callbacks are meaningless to LTTng. */
208 #undef TRACE_EVENT_FN
209 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
210 assign, print, reg, unreg) \
211 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
212 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
213
214 #define TRACE_EVENT_DESC_1(_system) __event_desc___##_system
215 #define TRACE_EVENT_DESC(_system) TRACE_EVENT_DESC_1(_system)
216
217 static const struct lttng_event_desc TRACE_EVENT_DESC(TRACE_SYSTEM)[] = {
218 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
219 };
220
221 #undef TRACE_EVENT_DESC_1
222 #undef TRACE_EVENT_DESC
223
224 #if 0
225
226 /*
227 * Stage 3 of the trace events.
228 *
229 * Create static inline function that calculates event size.
230 */
231
232
233
234 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
235
236 /*
237 * Stage 4 of the trace events.
238 *
239 * Create the probe function : call even size calculation and write event data
240 * into the buffer.
241 */
242
243
244
245 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
246
247
248
249
250 #include <linux/ftrace_event.h>
251
252 /*
253 * DECLARE_EVENT_CLASS can be used to add a generic function
254 * handlers for events. That is, if all events have the same
255 * parameters and just have distinct trace points.
256 * Each tracepoint can be defined with DEFINE_EVENT and that
257 * will map the DECLARE_EVENT_CLASS to the tracepoint.
258 *
259 * TRACE_EVENT is a one to one mapping between tracepoint and template.
260 */
261 #undef TRACE_EVENT
262 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
263 DECLARE_EVENT_CLASS(name, \
264 PARAMS(proto), \
265 PARAMS(args), \
266 PARAMS(tstruct), \
267 PARAMS(assign), \
268 PARAMS(print)); \
269 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
270
271
272 #undef __field
273 #define __field(type, item) type item;
274
275 #undef __field_ext
276 #define __field_ext(type, item, filter_type) type item;
277
278 #undef __array
279 #define __array(type, item, len) type item[len];
280
281 #undef __dynamic_array
282 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
283
284 #undef __string
285 #define __string(item, src) __dynamic_array(char, item, -1)
286
287 #undef TP_STRUCT__entry
288 #define TP_STRUCT__entry(args...) args
289
290 #undef DECLARE_EVENT_CLASS
291 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
292 struct ftrace_raw_##name { \
293 struct trace_entry ent; \
294 tstruct \
295 char __data[0]; \
296 }; \
297 \
298 static struct ftrace_event_class event_class_##name;
299
300 #undef DEFINE_EVENT
301 #define DEFINE_EVENT(template, name, proto, args) \
302 static struct ftrace_event_call __used \
303 __attribute__((__aligned__(4))) event_##name
304
305 #undef DEFINE_EVENT_PRINT
306 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
307 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
308
309 /* Callbacks are meaningless to ftrace. */
310 #undef TRACE_EVENT_FN
311 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
312 assign, print, reg, unreg) \
313 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
314 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
315
316 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
317
318
319 /*
320 * Stage 2 of the trace events.
321 *
322 * Create static inline function that calculates event size.
323 */
324
325 #undef __field
326 #define __field(type, item)
327
328 #undef __field_ext
329 #define __field_ext(type, item, filter_type)
330
331 #undef __array
332 #define __array(type, item, len)
333
334 #undef __dynamic_array
335 #define __dynamic_array(type, item, len) u32 item;
336
337 #undef __string
338 #define __string(item, src) __dynamic_array(char, item, -1)
339
340 #undef DECLARE_EVENT_CLASS
341 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
342 struct ftrace_data_offsets_##call { \
343 tstruct; \
344 };
345
346 #undef DEFINE_EVENT
347 #define DEFINE_EVENT(template, name, proto, args)
348
349 #undef DEFINE_EVENT_PRINT
350 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
351 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
352
353 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
354
355 /*
356 * Stage 3 of the trace events.
357 *
358 * Create the probe function : call even size calculation and write event data
359 * into the buffer.
360 */
361
362 #undef __entry
363 #define __entry field
364
365 #undef TP_printk
366 #define TP_printk(fmt, args...) fmt "\n", args
367
368 #undef __get_dynamic_array
369 #define __get_dynamic_array(field) \
370 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
371
372 #undef __get_str
373 #define __get_str(field) (char *)__get_dynamic_array(field)
374
375 #undef __print_flags
376 #define __print_flags(flag, delim, flag_array...) \
377 ({ \
378 static const struct trace_print_flags __flags[] = \
379 { flag_array, { -1, NULL }}; \
380 ftrace_print_flags_seq(p, delim, flag, __flags); \
381 })
382
383 #undef __print_symbolic
384 #define __print_symbolic(value, symbol_array...) \
385 ({ \
386 static const struct trace_print_flags symbols[] = \
387 { symbol_array, { -1, NULL }}; \
388 ftrace_print_symbols_seq(p, value, symbols); \
389 })
390
391 #undef __print_hex
392 #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
393
394 #undef DECLARE_EVENT_CLASS
395 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
396 static notrace enum print_line_t \
397 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
398 struct trace_event *trace_event) \
399 { \
400 struct ftrace_event_call *event; \
401 struct trace_seq *s = &iter->seq; \
402 struct ftrace_raw_##call *field; \
403 struct trace_entry *entry; \
404 struct trace_seq *p = &iter->tmp_seq; \
405 int ret; \
406 \
407 event = container_of(trace_event, struct ftrace_event_call, \
408 event); \
409 \
410 entry = iter->ent; \
411 \
412 if (entry->type != event->event.type) { \
413 WARN_ON_ONCE(1); \
414 return TRACE_TYPE_UNHANDLED; \
415 } \
416 \
417 field = (typeof(field))entry; \
418 \
419 trace_seq_init(p); \
420 ret = trace_seq_printf(s, "%s: ", event->name); \
421 if (ret) \
422 ret = trace_seq_printf(s, print); \
423 if (!ret) \
424 return TRACE_TYPE_PARTIAL_LINE; \
425 \
426 return TRACE_TYPE_HANDLED; \
427 } \
428 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
429 .trace = ftrace_raw_output_##call, \
430 };
431
432 #undef DEFINE_EVENT_PRINT
433 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
434 static notrace enum print_line_t \
435 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
436 struct trace_event *event) \
437 { \
438 struct trace_seq *s = &iter->seq; \
439 struct ftrace_raw_##template *field; \
440 struct trace_entry *entry; \
441 struct trace_seq *p = &iter->tmp_seq; \
442 int ret; \
443 \
444 entry = iter->ent; \
445 \
446 if (entry->type != event_##call.event.type) { \
447 WARN_ON_ONCE(1); \
448 return TRACE_TYPE_UNHANDLED; \
449 } \
450 \
451 field = (typeof(field))entry; \
452 \
453 trace_seq_init(p); \
454 ret = trace_seq_printf(s, "%s: ", #call); \
455 if (ret) \
456 ret = trace_seq_printf(s, print); \
457 if (!ret) \
458 return TRACE_TYPE_PARTIAL_LINE; \
459 \
460 return TRACE_TYPE_HANDLED; \
461 } \
462 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
463 .trace = ftrace_raw_output_##call, \
464 };
465
466 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
467
468 #undef __field_ext
469 #define __field_ext(type, item, filter_type) \
470 ret = trace_define_field(event_call, #type, #item, \
471 offsetof(typeof(field), item), \
472 sizeof(field.item), \
473 is_signed_type(type), filter_type); \
474 if (ret) \
475 return ret;
476
477 #undef __field
478 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
479
480 #undef __array
481 #define __array(type, item, len) \
482 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
483 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
484 offsetof(typeof(field), item), \
485 sizeof(field.item), \
486 is_signed_type(type), FILTER_OTHER); \
487 if (ret) \
488 return ret;
489
490 #undef __dynamic_array
491 #define __dynamic_array(type, item, len) \
492 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
493 offsetof(typeof(field), __data_loc_##item), \
494 sizeof(field.__data_loc_##item), \
495 is_signed_type(type), FILTER_OTHER);
496
497 #undef __string
498 #define __string(item, src) __dynamic_array(char, item, -1)
499
500 #undef DECLARE_EVENT_CLASS
501 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
502 static int notrace \
503 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
504 { \
505 struct ftrace_raw_##call field; \
506 int ret; \
507 \
508 tstruct; \
509 \
510 return ret; \
511 }
512
513 #undef DEFINE_EVENT
514 #define DEFINE_EVENT(template, name, proto, args)
515
516 #undef DEFINE_EVENT_PRINT
517 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
518 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
519
520 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
521
522 /*
523 * remember the offset of each array from the beginning of the event.
524 */
525
526 #undef __entry
527 #define __entry entry
528
529 #undef __field
530 #define __field(type, item)
531
532 #undef __field_ext
533 #define __field_ext(type, item, filter_type)
534
535 #undef __array
536 #define __array(type, item, len)
537
538 #undef __dynamic_array
539 #define __dynamic_array(type, item, len) \
540 __data_offsets->item = __data_size + \
541 offsetof(typeof(*entry), __data); \
542 __data_offsets->item |= (len * sizeof(type)) << 16; \
543 __data_size += (len) * sizeof(type);
544
545 #undef __string
546 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
547
548 #undef DECLARE_EVENT_CLASS
549 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
550 static inline notrace int ftrace_get_offsets_##call( \
551 struct ftrace_data_offsets_##call *__data_offsets, proto) \
552 { \
553 int __data_size = 0; \
554 struct ftrace_raw_##call __maybe_unused *entry; \
555 \
556 tstruct; \
557 \
558 return __data_size; \
559 }
560
561 #undef DEFINE_EVENT
562 #define DEFINE_EVENT(template, name, proto, args)
563
564 #undef DEFINE_EVENT_PRINT
565 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
566 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
567
568 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
569
570 /*
571 * Stage 4 of the trace events.
572 *
573 * Override the macros in <trace/trace_events.h> to include the following:
574 *
575 * For those macros defined with TRACE_EVENT:
576 *
577 * static struct ftrace_event_call event_<call>;
578 *
579 * static void ftrace_raw_event_<call>(void *__data, proto)
580 * {
581 * struct ftrace_event_call *event_call = __data;
582 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
583 * struct ring_buffer_event *event;
584 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
585 * struct ring_buffer *buffer;
586 * unsigned long irq_flags;
587 * int __data_size;
588 * int pc;
589 *
590 * local_save_flags(irq_flags);
591 * pc = preempt_count();
592 *
593 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
594 *
595 * event = trace_current_buffer_lock_reserve(&buffer,
596 * event_<call>->event.type,
597 * sizeof(*entry) + __data_size,
598 * irq_flags, pc);
599 * if (!event)
600 * return;
601 * entry = ring_buffer_event_data(event);
602 *
603 * { <assign>; } <-- Here we assign the entries by the __field and
604 * __array macros.
605 *
606 * if (!filter_current_check_discard(buffer, event_call, entry, event))
607 * trace_current_buffer_unlock_commit(buffer,
608 * event, irq_flags, pc);
609 * }
610 *
611 * static struct trace_event ftrace_event_type_<call> = {
612 * .trace = ftrace_raw_output_<call>, <-- stage 2
613 * };
614 *
615 * static const char print_fmt_<call>[] = <TP_printk>;
616 *
617 * static struct ftrace_event_class __used event_class_<template> = {
618 * .system = "<system>",
619 * .define_fields = ftrace_define_fields_<call>,
620 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
621 * .raw_init = trace_event_raw_init,
622 * .probe = ftrace_raw_event_##call,
623 * .reg = ftrace_event_reg,
624 * };
625 *
626 * static struct ftrace_event_call __used
627 * __attribute__((__aligned__(4)))
628 * __attribute__((section("_ftrace_events"))) event_<call> = {
629 * .name = "<call>",
630 * .class = event_class_<template>,
631 * .event = &ftrace_event_type_<call>,
632 * .print_fmt = print_fmt_<call>,
633 * };
634 *
635 */
636
637 #ifdef CONFIG_PERF_EVENTS
638
639 #define _TRACE_PERF_PROTO(call, proto) \
640 static notrace void \
641 perf_trace_##call(void *__data, proto);
642
643 #define _TRACE_PERF_INIT(call) \
644 .perf_probe = perf_trace_##call,
645
646 #else
647 #define _TRACE_PERF_PROTO(call, proto)
648 #define _TRACE_PERF_INIT(call)
649 #endif /* CONFIG_PERF_EVENTS */
650
651 #undef __entry
652 #define __entry entry
653
654 #undef __field
655 #define __field(type, item)
656
657 #undef __array
658 #define __array(type, item, len)
659
660 #undef __dynamic_array
661 #define __dynamic_array(type, item, len) \
662 __entry->__data_loc_##item = __data_offsets.item;
663
664 #undef __string
665 #define __string(item, src) __dynamic_array(char, item, -1) \
666
667 #undef __assign_str
668 #define __assign_str(dst, src) \
669 strcpy(__get_str(dst), src);
670
671 #undef TP_fast_assign
672 #define TP_fast_assign(args...) args
673
674 #undef TP_perf_assign
675 #define TP_perf_assign(args...)
676
677 #undef DECLARE_EVENT_CLASS
678 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
679 \
680 static notrace void \
681 ftrace_raw_event_##call(void *__data, proto) \
682 { \
683 struct ftrace_event_call *event_call = __data; \
684 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
685 struct ring_buffer_event *event; \
686 struct ftrace_raw_##call *entry; \
687 struct ring_buffer *buffer; \
688 unsigned long irq_flags; \
689 int __data_size; \
690 int pc; \
691 \
692 local_save_flags(irq_flags); \
693 pc = preempt_count(); \
694 \
695 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
696 \
697 event = trace_current_buffer_lock_reserve(&buffer, \
698 event_call->event.type, \
699 sizeof(*entry) + __data_size, \
700 irq_flags, pc); \
701 if (!event) \
702 return; \
703 entry = ring_buffer_event_data(event); \
704 \
705 tstruct \
706 \
707 { assign; } \
708 \
709 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
710 trace_nowake_buffer_unlock_commit(buffer, \
711 event, irq_flags, pc); \
712 }
713 /*
714 * The ftrace_test_probe is compiled out, it is only here as a build time check
715 * to make sure that if the tracepoint handling changes, the ftrace probe will
716 * fail to compile unless it too is updated.
717 */
718
719 #undef DEFINE_EVENT
720 #define DEFINE_EVENT(template, call, proto, args) \
721 static inline void ftrace_test_probe_##call(void) \
722 { \
723 check_trace_callback_type_##call(ftrace_raw_event_##template); \
724 }
725
726 #undef DEFINE_EVENT_PRINT
727 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
728
729 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
730
731 #undef __entry
732 #define __entry REC
733
734 #undef __print_flags
735 #undef __print_symbolic
736 #undef __get_dynamic_array
737 #undef __get_str
738
739 #undef TP_printk
740 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
741
742 #undef DECLARE_EVENT_CLASS
743 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
744 _TRACE_PERF_PROTO(call, PARAMS(proto)); \
745 static const char print_fmt_##call[] = print; \
746 static struct ftrace_event_class __used event_class_##call = { \
747 .system = __stringify(TRACE_SYSTEM), \
748 .define_fields = ftrace_define_fields_##call, \
749 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
750 .raw_init = trace_event_raw_init, \
751 .probe = ftrace_raw_event_##call, \
752 .reg = ftrace_event_reg, \
753 _TRACE_PERF_INIT(call) \
754 };
755
756 #undef DEFINE_EVENT
757 #define DEFINE_EVENT(template, call, proto, args) \
758 \
759 static struct ftrace_event_call __used \
760 __attribute__((__aligned__(4))) \
761 __attribute__((section("_ftrace_events"))) event_##call = { \
762 .name = #call, \
763 .class = &event_class_##template, \
764 .event.funcs = &ftrace_event_type_funcs_##template, \
765 .print_fmt = print_fmt_##template, \
766 };
767
768 #undef DEFINE_EVENT_PRINT
769 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
770 \
771 static const char print_fmt_##call[] = print; \
772 \
773 static struct ftrace_event_call __used \
774 __attribute__((__aligned__(4))) \
775 __attribute__((section("_ftrace_events"))) event_##call = { \
776 .name = #call, \
777 .class = &event_class_##template, \
778 .event.funcs = &ftrace_event_type_funcs_##call, \
779 .print_fmt = print_fmt_##call, \
780 }
781
782 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
783
784 /*
785 * Define the insertion callback to perf events
786 *
787 * The job is very similar to ftrace_raw_event_<call> except that we don't
788 * insert in the ring buffer but in a perf counter.
789 *
790 * static void ftrace_perf_<call>(proto)
791 * {
792 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
793 * struct ftrace_event_call *event_call = &event_<call>;
794 * extern void perf_tp_event(int, u64, u64, void *, int);
795 * struct ftrace_raw_##call *entry;
796 * struct perf_trace_buf *trace_buf;
797 * u64 __addr = 0, __count = 1;
798 * unsigned long irq_flags;
799 * struct trace_entry *ent;
800 * int __entry_size;
801 * int __data_size;
802 * int __cpu
803 * int pc;
804 *
805 * pc = preempt_count();
806 *
807 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
808 *
809 * // Below we want to get the aligned size by taking into account
810 * // the u32 field that will later store the buffer size
811 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
812 * sizeof(u64));
813 * __entry_size -= sizeof(u32);
814 *
815 * // Protect the non nmi buffer
816 * // This also protects the rcu read side
817 * local_irq_save(irq_flags);
818 * __cpu = smp_processor_id();
819 *
820 * if (in_nmi())
821 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
822 * else
823 * trace_buf = rcu_dereference_sched(perf_trace_buf);
824 *
825 * if (!trace_buf)
826 * goto end;
827 *
828 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
829 *
830 * // Avoid recursion from perf that could mess up the buffer
831 * if (trace_buf->recursion++)
832 * goto end_recursion;
833 *
834 * raw_data = trace_buf->buf;
835 *
836 * // Make recursion update visible before entering perf_tp_event
837 * // so that we protect from perf recursions.
838 *
839 * barrier();
840 *
841 * //zero dead bytes from alignment to avoid stack leak to userspace:
842 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
843 * entry = (struct ftrace_raw_<call> *)raw_data;
844 * ent = &entry->ent;
845 * tracing_generic_entry_update(ent, irq_flags, pc);
846 * ent->type = event_call->id;
847 *
848 * <tstruct> <- do some jobs with dynamic arrays
849 *
850 * <assign> <- affect our values
851 *
852 * perf_tp_event(event_call->id, __addr, __count, entry,
853 * __entry_size); <- submit them to perf counter
854 *
855 * }
856 */
857
858 #ifdef CONFIG_PERF_EVENTS
859
860 #undef __entry
861 #define __entry entry
862
863 #undef __get_dynamic_array
864 #define __get_dynamic_array(field) \
865 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
866
867 #undef __get_str
868 #define __get_str(field) (char *)__get_dynamic_array(field)
869
870 #undef __perf_addr
871 #define __perf_addr(a) __addr = (a)
872
873 #undef __perf_count
874 #define __perf_count(c) __count = (c)
875
876 #undef DECLARE_EVENT_CLASS
877 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
878 static notrace void \
879 perf_trace_##call(void *__data, proto) \
880 { \
881 struct ftrace_event_call *event_call = __data; \
882 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
883 struct ftrace_raw_##call *entry; \
884 struct pt_regs __regs; \
885 u64 __addr = 0, __count = 1; \
886 struct hlist_head *head; \
887 int __entry_size; \
888 int __data_size; \
889 int rctx; \
890 \
891 perf_fetch_caller_regs(&__regs); \
892 \
893 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
894 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
895 sizeof(u64)); \
896 __entry_size -= sizeof(u32); \
897 \
898 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
899 "profile buffer not large enough")) \
900 return; \
901 \
902 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
903 __entry_size, event_call->event.type, &__regs, &rctx); \
904 if (!entry) \
905 return; \
906 \
907 tstruct \
908 \
909 { assign; } \
910 \
911 head = this_cpu_ptr(event_call->perf_events); \
912 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
913 __count, &__regs, head); \
914 }
915
916 /*
917 * This part is compiled out, it is only here as a build time check
918 * to make sure that if the tracepoint handling changes, the
919 * perf probe will fail to compile unless it too is updated.
920 */
921 #undef DEFINE_EVENT
922 #define DEFINE_EVENT(template, call, proto, args) \
923 static inline void perf_test_probe_##call(void) \
924 { \
925 check_trace_callback_type_##call(perf_trace_##template); \
926 }
927
928
929 #undef DEFINE_EVENT_PRINT
930 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
931 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
932
933 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
934 #endif /* CONFIG_PERF_EVENTS */
935
936 #undef _TRACE_PROFILE_INIT
937 #endif //0
This page took 0.061773 seconds and 4 git commands to generate.