0c25b955392b8277c6811538d6ab719992798e0d
2 #include <lttng-types.h>
3 #include <linux/debugfs.h>
6 * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to
10 #define tp_assign(dest, src) \
11 lib_ring_buffer_align_ctx(config, &ctx, sizeof(src)); \
12 lib_ring_buffer_write(config, &ctx, &src, sizeof(src));
15 #define tp_memcpy(dest, src, len) \
16 lib_ring_buffer_align_ctx(config, &ctx, sizeof(*(src))); \
17 lib_ring_buffer_write(config, &ctx, &src, len);
21 #define tp_strcpy(dest, src) __assign_str(dest, src);
23 struct lttng_event_field
{
25 const struct lttng_type type
;
28 struct lttng_event_desc
{
29 const struct lttng_event_field
*fields
;
31 unsigned int nr_fields
;
35 * Stage 1 of the trace events.
37 * Create event field type metadata section.
38 * Each event produce an array of fields.
42 * DECLARE_EVENT_CLASS can be used to add a generic function
43 * handlers for events. That is, if all events have the same
44 * parameters and just have distinct trace points.
45 * Each tracepoint can be defined with DEFINE_EVENT and that
46 * will map the DECLARE_EVENT_CLASS to the tracepoint.
48 * TRACE_EVENT is a one to one mapping between tracepoint and template.
51 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
52 DECLARE_EVENT_CLASS(name, \
58 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args))
60 /* Named field types must be defined in lttng-types.h */
63 #define __field(_type, _item) \
64 { .name = #_item, .type = { .atype = atype_integer, .name = #_type} },
67 #define __field_ext(_type, _item, _filter_type) \
68 { .name = #_item, .type = { .atype = atype_integer, .name = #_type} },
71 #define __array(_type, _item, _length) \
75 .atype = atype_array, \
77 .u.array.elem_type = #_type, \
78 .u.array.length = _length, \
82 #undef __dynamic_array
83 #define __dynamic_array(_type, _item, _length) \
87 .atype = atype_sequence, \
89 .u.sequence.elem_type = #_type, \
90 .u.sequence.length_type = "u32", \
95 #define __string(_item, _src) \
99 .atype = atype_string, \
101 .u.string.encoding = lttng_encode_UTF8, \
106 #define TP_PROTO(args...)
109 #define TP_ARGS(args...)
111 #undef TP_STRUCT__entry
112 #define TP_STRUCT__entry(args...) args /* Only one used in this phase */
114 #undef TP_fast_assign
115 #define TP_fast_assign(args...)
118 #define TP_printk(args...)
120 #undef DECLARE_EVENT_CLASS
121 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
122 static const struct lttng_event_field __event_fields___##name[] = { \
127 #define DEFINE_EVENT(template, name, proto, args)
129 #undef DEFINE_EVENT_PRINT
130 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
131 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
133 /* Callbacks are meaningless to LTTng. */
134 #undef TRACE_EVENT_FN
135 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
136 assign, print, reg, unreg) \
137 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
138 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
140 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
143 * Stage 2 of the trace events.
145 * Create an array of events.
149 * DECLARE_EVENT_CLASS can be used to add a generic function
150 * handlers for events. That is, if all events have the same
151 * parameters and just have distinct trace points.
152 * Each tracepoint can be defined with DEFINE_EVENT and that
153 * will map the DECLARE_EVENT_CLASS to the tracepoint.
155 * TRACE_EVENT is a one to one mapping between tracepoint and template.
158 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
159 DECLARE_EVENT_CLASS(name, \
165 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args))
167 /* Named field types must be defined in lttng-types.h */
170 #define __field(_type, _item)
173 #define __field_ext(_type, _item, _filter_type)
176 #define __array(_type, _item, _length)
178 #undef __dynamic_array
179 #define __dynamic_array(_type, _item, _length)
182 #define __string(_item, _src)
185 #define TP_PROTO(args...)
188 #define TP_ARGS(args...)
190 #undef TP_STRUCT__entry
191 #define TP_STRUCT__entry(args...)
193 #undef TP_fast_assign
194 #define TP_fast_assign(args...)
197 #define TP_printk(args...)
199 #undef DECLARE_EVENT_CLASS
200 #define DECLARE_EVENT_CLASS(_name, proto, args, tstruct, assign, print) \
202 .fields = __event_fields___##_name, \
204 .nr_fields = ARRAY_SIZE(__event_fields___##_name), \
208 #define DEFINE_EVENT(template, name, proto, args)
210 #undef DEFINE_EVENT_PRINT
211 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
212 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
214 /* Callbacks are meaningless to LTTng. */
215 #undef TRACE_EVENT_FN
216 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
217 assign, print, reg, unreg) \
218 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
219 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
221 #define TP_ID1(_token, _system) _token##_system
222 #define TP_ID(_token, _system) TP_ID1(_token, _system)
224 static const struct lttng_event_desc
TP_ID(__event_desc___
, TRACE_SYSTEM
)[] = {
225 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
232 * Stage 3 of the trace events.
234 * Create seq file metadata output.
238 * DECLARE_EVENT_CLASS can be used to add a generic function
239 * handlers for events. That is, if all events have the same
240 * parameters and just have distinct trace points.
241 * Each tracepoint can be defined with DEFINE_EVENT and that
242 * will map the DECLARE_EVENT_CLASS to the tracepoint.
244 * TRACE_EVENT is a one to one mapping between tracepoint and template.
247 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
248 DECLARE_EVENT_CLASS(name, \
254 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args))
256 /* Named field types must be defined in lttng-types.h */
259 #define __field(_type, _item)
262 #define __field_ext(_type, _item, _filter_type)
265 #define __array(_type, _item, _length)
267 #undef __dynamic_array
268 #define __dynamic_array(_type, _item, _length)
271 #define __string(_item, _src)
274 #define TP_PROTO(args...)
277 #define TP_ARGS(args...)
279 #undef TP_STRUCT__entry
280 #define TP_STRUCT__entry(args...)
282 #undef TP_fast_assign
283 #define TP_fast_assign(args...)
286 #define TP_printk(args...)
288 #undef DECLARE_EVENT_CLASS
289 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
293 #define DEFINE_EVENT(template, name, proto, args)
295 #undef DEFINE_EVENT_PRINT
296 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
297 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
299 /* Callbacks are meaningless to LTTng. */
300 #undef TRACE_EVENT_FN
301 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
302 assign, print, reg, unreg) \
303 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
304 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
306 #define TP_ID1(_token, _system) _token##_system
307 #define TP_ID(_token, _system) TP_ID1(_token, _system)
308 #define module_init_eval1(_token, _system) module_init(_token##_system)
309 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
310 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
311 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
313 static void *TP_ID(__lttng_seq_start__
, TRACE_SYSTEM
)(struct seq_file
*m
,
316 const struct lttng_event_desc
*desc
= &TP_ID(__event_desc___
, TRACE_SYSTEM
)[*pos
];
318 if (desc
> &TP_ID(__event_desc___
, TRACE_SYSTEM
)[ARRAY_SIZE(TP_ID(__event_desc___
, TRACE_SYSTEM
)) - 1])
320 return (void *) desc
;
323 static void *TP_ID(__lttng_seq_next__
, TRACE_SYSTEM
)(struct seq_file
*m
,
324 void *p
, loff_t
*ppos
)
326 const struct lttng_event_desc
*desc
= &TP_ID(__event_desc___
, TRACE_SYSTEM
)[++(*ppos
)];
328 if (desc
> &TP_ID(__event_desc___
, TRACE_SYSTEM
)[ARRAY_SIZE(TP_ID(__event_desc___
, TRACE_SYSTEM
)) - 1])
330 return (void *) desc
;
333 static void TP_ID(__lttng_seq_stop__
, TRACE_SYSTEM
)(struct seq_file
*m
,
338 static int TP_ID(__lttng_seq_show__
, TRACE_SYSTEM
)(struct seq_file
*m
,
341 const struct lttng_event_desc
*desc
= p
;
344 seq_printf(m
, "event {\n"
347 "\tstream = UNKNOWN;\n"
350 for (i
= 0; i
< desc
->nr_fields
; i
++) {
351 if (desc
->fields
[i
].type
.name
) /* Named type */
352 seq_printf(m
, "\t\t%s",
353 desc
->fields
[i
].type
.name
);
354 else /* Nameless type */
355 lttng_print_event_type(m
, 2, &desc
->fields
[i
].type
);
356 seq_printf(m
, " %s;\n", desc
->fields
[i
].name
);
358 seq_printf(m
, "\t};\n");
359 seq_printf(m
, "};\n");
364 struct seq_operations
TP_ID(__lttng_types_seq_ops__
, TRACE_SYSTEM
) = {
365 .start
= TP_ID(__lttng_seq_start__
, TRACE_SYSTEM
),
366 .next
= TP_ID(__lttng_seq_next__
, TRACE_SYSTEM
),
367 .stop
= TP_ID(__lttng_seq_stop__
, TRACE_SYSTEM
),
368 .show
= TP_ID(__lttng_seq_show__
, TRACE_SYSTEM
),
372 TP_ID(__lttng_types_open__
, TRACE_SYSTEM
)(struct inode
*inode
, struct file
*file
)
374 return seq_open(file
, &TP_ID(__lttng_types_seq_ops__
, TRACE_SYSTEM
));
377 static const struct file_operations
TP_ID(__lttng_types_fops__
, TRACE_SYSTEM
) = {
378 .open
= TP_ID(__lttng_types_open__
, TRACE_SYSTEM
),
381 .release
= seq_release_private
,
384 static struct dentry
*TP_ID(__lttng_types_dentry__
, TRACE_SYSTEM
);
386 static int TP_ID(__lttng_types_init__
, TRACE_SYSTEM
)(void)
390 TP_ID(__lttng_types_dentry__
, TRACE_SYSTEM
) =
391 debugfs_create_file("lttng-events-" __stringify(TRACE_SYSTEM
), S_IWUSR
,
392 NULL
, NULL
, &TP_ID(__lttng_types_fops__
, TRACE_SYSTEM
));
393 if (IS_ERR(TP_ID(__lttng_types_dentry__
, TRACE_SYSTEM
))
394 || !TP_ID(__lttng_types_dentry__
, TRACE_SYSTEM
)) {
395 printk(KERN_ERR
"Error creating LTTng type export file\n");
403 module_init_eval(__lttng_types_init__
, TRACE_SYSTEM
);
405 static void TP_ID(__lttng_types_exit__
, TRACE_SYSTEM
)(void)
407 debugfs_remove(TP_ID(__lttng_types_dentry__
, TRACE_SYSTEM
));
410 module_exit_eval(__lttng_types_exit__
, TRACE_SYSTEM
);
412 #undef module_init_eval
413 #undef module_exit_eval
421 * Stage 3 of the trace events.
423 * Create static inline function that calculates event size.
428 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
431 * Stage 4 of the trace events.
433 * Create the probe function : call even size calculation and write event data
439 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
444 #include <linux/ftrace_event.h>
447 * DECLARE_EVENT_CLASS can be used to add a generic function
448 * handlers for events. That is, if all events have the same
449 * parameters and just have distinct trace points.
450 * Each tracepoint can be defined with DEFINE_EVENT and that
451 * will map the DECLARE_EVENT_CLASS to the tracepoint.
453 * TRACE_EVENT is a one to one mapping between tracepoint and template.
456 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
457 DECLARE_EVENT_CLASS(name, \
463 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
467 #define __field(type, item) type item;
470 #define __field_ext(type, item, filter_type) type item;
473 #define __array(type, item, len) type item[len];
475 #undef __dynamic_array
476 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
479 #define __string(item, src) __dynamic_array(char, item, -1)
481 #undef TP_STRUCT__entry
482 #define TP_STRUCT__entry(args...) args
484 #undef DECLARE_EVENT_CLASS
485 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
486 struct ftrace_raw_##name { \
487 struct trace_entry ent; \
492 static struct ftrace_event_class event_class_##name;
495 #define DEFINE_EVENT(template, name, proto, args) \
496 static struct ftrace_event_call __used \
497 __attribute__((__aligned__(4))) event_##name
499 #undef DEFINE_EVENT_PRINT
500 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
501 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
503 /* Callbacks are meaningless to ftrace. */
504 #undef TRACE_EVENT_FN
505 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
506 assign, print, reg, unreg) \
507 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
508 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
510 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
514 * Stage 2 of the trace events.
516 * Create static inline function that calculates event size.
520 #define __field(type, item)
523 #define __field_ext(type, item, filter_type)
526 #define __array(type, item, len)
528 #undef __dynamic_array
529 #define __dynamic_array(type, item, len) u32 item;
532 #define __string(item, src) __dynamic_array(char, item, -1)
534 #undef DECLARE_EVENT_CLASS
535 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
536 struct ftrace_data_offsets_##call { \
541 #define DEFINE_EVENT(template, name, proto, args)
543 #undef DEFINE_EVENT_PRINT
544 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
545 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
547 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
550 * Stage 3 of the trace events.
552 * Create the probe function : call even size calculation and write event data
557 #define __entry field
560 #define TP_printk(fmt, args...) fmt "\n", args
562 #undef __get_dynamic_array
563 #define __get_dynamic_array(field) \
564 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
567 #define __get_str(field) (char *)__get_dynamic_array(field)
570 #define __print_flags(flag, delim, flag_array...) \
572 static const struct trace_print_flags __flags[] = \
573 { flag_array, { -1, NULL }}; \
574 ftrace_print_flags_seq(p, delim, flag, __flags); \
577 #undef __print_symbolic
578 #define __print_symbolic(value, symbol_array...) \
580 static const struct trace_print_flags symbols[] = \
581 { symbol_array, { -1, NULL }}; \
582 ftrace_print_symbols_seq(p, value, symbols); \
586 #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
588 #undef DECLARE_EVENT_CLASS
589 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
590 static notrace enum print_line_t \
591 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
592 struct trace_event *trace_event) \
594 struct ftrace_event_call *event; \
595 struct trace_seq *s = &iter->seq; \
596 struct ftrace_raw_##call *field; \
597 struct trace_entry *entry; \
598 struct trace_seq *p = &iter->tmp_seq; \
601 event = container_of(trace_event, struct ftrace_event_call, \
606 if (entry->type != event->event.type) { \
608 return TRACE_TYPE_UNHANDLED; \
611 field = (typeof(field))entry; \
614 ret = trace_seq_printf(s, "%s: ", event->name); \
616 ret = trace_seq_printf(s, print); \
618 return TRACE_TYPE_PARTIAL_LINE; \
620 return TRACE_TYPE_HANDLED; \
622 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
623 .trace = ftrace_raw_output_##call, \
626 #undef DEFINE_EVENT_PRINT
627 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
628 static notrace enum print_line_t \
629 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
630 struct trace_event *event) \
632 struct trace_seq *s = &iter->seq; \
633 struct ftrace_raw_##template *field; \
634 struct trace_entry *entry; \
635 struct trace_seq *p = &iter->tmp_seq; \
640 if (entry->type != event_##call.event.type) { \
642 return TRACE_TYPE_UNHANDLED; \
645 field = (typeof(field))entry; \
648 ret = trace_seq_printf(s, "%s: ", #call); \
650 ret = trace_seq_printf(s, print); \
652 return TRACE_TYPE_PARTIAL_LINE; \
654 return TRACE_TYPE_HANDLED; \
656 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
657 .trace = ftrace_raw_output_##call, \
660 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
663 #define __field_ext(type, item, filter_type) \
664 ret = trace_define_field(event_call, #type, #item, \
665 offsetof(typeof(field), item), \
666 sizeof(field.item), \
667 is_signed_type(type), filter_type); \
672 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
675 #define __array(type, item, len) \
676 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
677 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
678 offsetof(typeof(field), item), \
679 sizeof(field.item), \
680 is_signed_type(type), FILTER_OTHER); \
684 #undef __dynamic_array
685 #define __dynamic_array(type, item, len) \
686 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
687 offsetof(typeof(field), __data_loc_##item), \
688 sizeof(field.__data_loc_##item), \
689 is_signed_type(type), FILTER_OTHER);
692 #define __string(item, src) __dynamic_array(char, item, -1)
694 #undef DECLARE_EVENT_CLASS
695 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
697 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
699 struct ftrace_raw_##call field; \
708 #define DEFINE_EVENT(template, name, proto, args)
710 #undef DEFINE_EVENT_PRINT
711 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
712 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
714 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
717 * remember the offset of each array from the beginning of the event.
721 #define __entry entry
724 #define __field(type, item)
727 #define __field_ext(type, item, filter_type)
730 #define __array(type, item, len)
732 #undef __dynamic_array
733 #define __dynamic_array(type, item, len) \
734 __data_offsets->item = __data_size + \
735 offsetof(typeof(*entry), __data); \
736 __data_offsets->item |= (len * sizeof(type)) << 16; \
737 __data_size += (len) * sizeof(type);
740 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
742 #undef DECLARE_EVENT_CLASS
743 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
744 static inline notrace int ftrace_get_offsets_##call( \
745 struct ftrace_data_offsets_##call *__data_offsets, proto) \
747 int __data_size = 0; \
748 struct ftrace_raw_##call __maybe_unused *entry; \
752 return __data_size; \
756 #define DEFINE_EVENT(template, name, proto, args)
758 #undef DEFINE_EVENT_PRINT
759 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
760 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
762 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
765 * Stage 4 of the trace events.
767 * Override the macros in <trace/trace_events.h> to include the following:
769 * For those macros defined with TRACE_EVENT:
771 * static struct ftrace_event_call event_<call>;
773 * static void ftrace_raw_event_<call>(void *__data, proto)
775 * struct ftrace_event_call *event_call = __data;
776 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
777 * struct ring_buffer_event *event;
778 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
779 * struct ring_buffer *buffer;
780 * unsigned long irq_flags;
784 * local_save_flags(irq_flags);
785 * pc = preempt_count();
787 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
789 * event = trace_current_buffer_lock_reserve(&buffer,
790 * event_<call>->event.type,
791 * sizeof(*entry) + __data_size,
795 * entry = ring_buffer_event_data(event);
797 * { <assign>; } <-- Here we assign the entries by the __field and
800 * if (!filter_current_check_discard(buffer, event_call, entry, event))
801 * trace_current_buffer_unlock_commit(buffer,
802 * event, irq_flags, pc);
805 * static struct trace_event ftrace_event_type_<call> = {
806 * .trace = ftrace_raw_output_<call>, <-- stage 2
809 * static const char print_fmt_<call>[] = <TP_printk>;
811 * static struct ftrace_event_class __used event_class_<template> = {
812 * .system = "<system>",
813 * .define_fields = ftrace_define_fields_<call>,
814 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
815 * .raw_init = trace_event_raw_init,
816 * .probe = ftrace_raw_event_##call,
817 * .reg = ftrace_event_reg,
820 * static struct ftrace_event_call __used
821 * __attribute__((__aligned__(4)))
822 * __attribute__((section("_ftrace_events"))) event_<call> = {
824 * .class = event_class_<template>,
825 * .event = &ftrace_event_type_<call>,
826 * .print_fmt = print_fmt_<call>,
831 #ifdef CONFIG_PERF_EVENTS
833 #define _TRACE_PERF_PROTO(call, proto) \
834 static notrace void \
835 perf_trace_##call(void *__data, proto);
837 #define _TRACE_PERF_INIT(call) \
838 .perf_probe = perf_trace_##call,
841 #define _TRACE_PERF_PROTO(call, proto)
842 #define _TRACE_PERF_INIT(call)
843 #endif /* CONFIG_PERF_EVENTS */
846 #define __entry entry
849 #define __field(type, item)
852 #define __array(type, item, len)
854 #undef __dynamic_array
855 #define __dynamic_array(type, item, len) \
856 __entry->__data_loc_##item = __data_offsets.item;
859 #define __string(item, src) __dynamic_array(char, item, -1) \
862 #define __assign_str(dst, src) \
863 strcpy(__get_str(dst), src);
865 #undef TP_fast_assign
866 #define TP_fast_assign(args...) args
868 #undef TP_perf_assign
869 #define TP_perf_assign(args...)
871 #undef DECLARE_EVENT_CLASS
872 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
874 static notrace void \
875 ftrace_raw_event_##call(void *__data, proto) \
877 struct ftrace_event_call *event_call = __data; \
878 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
879 struct ring_buffer_event *event; \
880 struct ftrace_raw_##call *entry; \
881 struct ring_buffer *buffer; \
882 unsigned long irq_flags; \
886 local_save_flags(irq_flags); \
887 pc = preempt_count(); \
889 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
891 event = trace_current_buffer_lock_reserve(&buffer, \
892 event_call->event.type, \
893 sizeof(*entry) + __data_size, \
897 entry = ring_buffer_event_data(event); \
903 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
904 trace_nowake_buffer_unlock_commit(buffer, \
905 event, irq_flags, pc); \
908 * The ftrace_test_probe is compiled out, it is only here as a build time check
909 * to make sure that if the tracepoint handling changes, the ftrace probe will
910 * fail to compile unless it too is updated.
914 #define DEFINE_EVENT(template, call, proto, args) \
915 static inline void ftrace_test_probe_##call(void) \
917 check_trace_callback_type_##call(ftrace_raw_event_##template); \
920 #undef DEFINE_EVENT_PRINT
921 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
923 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
929 #undef __print_symbolic
930 #undef __get_dynamic_array
934 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
936 #undef DECLARE_EVENT_CLASS
937 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
938 _TRACE_PERF_PROTO(call, PARAMS(proto)); \
939 static const char print_fmt_##call[] = print; \
940 static struct ftrace_event_class __used event_class_##call = { \
941 .system = __stringify(TRACE_SYSTEM), \
942 .define_fields = ftrace_define_fields_##call, \
943 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
944 .raw_init = trace_event_raw_init, \
945 .probe = ftrace_raw_event_##call, \
946 .reg = ftrace_event_reg, \
947 _TRACE_PERF_INIT(call) \
951 #define DEFINE_EVENT(template, call, proto, args) \
953 static struct ftrace_event_call __used \
954 __attribute__((__aligned__(4))) \
955 __attribute__((section("_ftrace_events"))) event_##call = { \
957 .class = &event_class_##template, \
958 .event.funcs = &ftrace_event_type_funcs_##template, \
959 .print_fmt = print_fmt_##template, \
962 #undef DEFINE_EVENT_PRINT
963 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
965 static const char print_fmt_##call[] = print; \
967 static struct ftrace_event_call __used \
968 __attribute__((__aligned__(4))) \
969 __attribute__((section("_ftrace_events"))) event_##call = { \
971 .class = &event_class_##template, \
972 .event.funcs = &ftrace_event_type_funcs_##call, \
973 .print_fmt = print_fmt_##call, \
976 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
979 * Define the insertion callback to perf events
981 * The job is very similar to ftrace_raw_event_<call> except that we don't
982 * insert in the ring buffer but in a perf counter.
984 * static void ftrace_perf_<call>(proto)
986 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
987 * struct ftrace_event_call *event_call = &event_<call>;
988 * extern void perf_tp_event(int, u64, u64, void *, int);
989 * struct ftrace_raw_##call *entry;
990 * struct perf_trace_buf *trace_buf;
991 * u64 __addr = 0, __count = 1;
992 * unsigned long irq_flags;
993 * struct trace_entry *ent;
999 * pc = preempt_count();
1001 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
1003 * // Below we want to get the aligned size by taking into account
1004 * // the u32 field that will later store the buffer size
1005 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
1007 * __entry_size -= sizeof(u32);
1009 * // Protect the non nmi buffer
1010 * // This also protects the rcu read side
1011 * local_irq_save(irq_flags);
1012 * __cpu = smp_processor_id();
1015 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
1017 * trace_buf = rcu_dereference_sched(perf_trace_buf);
1022 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
1024 * // Avoid recursion from perf that could mess up the buffer
1025 * if (trace_buf->recursion++)
1026 * goto end_recursion;
1028 * raw_data = trace_buf->buf;
1030 * // Make recursion update visible before entering perf_tp_event
1031 * // so that we protect from perf recursions.
1035 * //zero dead bytes from alignment to avoid stack leak to userspace:
1036 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
1037 * entry = (struct ftrace_raw_<call> *)raw_data;
1038 * ent = &entry->ent;
1039 * tracing_generic_entry_update(ent, irq_flags, pc);
1040 * ent->type = event_call->id;
1042 * <tstruct> <- do some jobs with dynamic arrays
1044 * <assign> <- affect our values
1046 * perf_tp_event(event_call->id, __addr, __count, entry,
1047 * __entry_size); <- submit them to perf counter
1052 #ifdef CONFIG_PERF_EVENTS
1055 #define __entry entry
1057 #undef __get_dynamic_array
1058 #define __get_dynamic_array(field) \
1059 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
1062 #define __get_str(field) (char *)__get_dynamic_array(field)
1065 #define __perf_addr(a) __addr = (a)
1068 #define __perf_count(c) __count = (c)
1070 #undef DECLARE_EVENT_CLASS
1071 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
1072 static notrace void \
1073 perf_trace_##call(void *__data, proto) \
1075 struct ftrace_event_call *event_call = __data; \
1076 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
1077 struct ftrace_raw_##call *entry; \
1078 struct pt_regs __regs; \
1079 u64 __addr = 0, __count = 1; \
1080 struct hlist_head *head; \
1085 perf_fetch_caller_regs(&__regs); \
1087 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
1088 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
1090 __entry_size -= sizeof(u32); \
1092 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
1093 "profile buffer not large enough")) \
1096 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
1097 __entry_size, event_call->event.type, &__regs, &rctx); \
1105 head = this_cpu_ptr(event_call->perf_events); \
1106 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
1107 __count, &__regs, head); \
1111 * This part is compiled out, it is only here as a build time check
1112 * to make sure that if the tracepoint handling changes, the
1113 * perf probe will fail to compile unless it too is updated.
1116 #define DEFINE_EVENT(template, call, proto, args) \
1117 static inline void perf_test_probe_##call(void) \
1119 check_trace_callback_type_##call(perf_trace_##template); \
1123 #undef DEFINE_EVENT_PRINT
1124 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
1125 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
1127 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1128 #endif /* CONFIG_PERF_EVENTS */
1130 #undef _TRACE_PROFILE_INIT
This page took 0.053059 seconds and 4 git commands to generate.