d1b3de9c1a714f7624956c01fb60bd22898d4c6a
[deliverable/linux.git] / include / trace / ftrace.h
1 /*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19 #include <linux/ftrace_event.h>
20
21 /*
22 * DECLARE_EVENT_CLASS can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
27 *
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
29 */
30 #undef TRACE_EVENT
31 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 DECLARE_EVENT_CLASS(name, \
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
41 #undef __field
42 #define __field(type, item) type item;
43
44 #undef __field_ext
45 #define __field_ext(type, item, filter_type) type item;
46
47 #undef __array
48 #define __array(type, item, len) type item[len];
49
50 #undef __dynamic_array
51 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
52
53 #undef __string
54 #define __string(item, src) __dynamic_array(char, item, -1)
55
56 #undef TP_STRUCT__entry
57 #define TP_STRUCT__entry(args...) args
58
59 #undef DECLARE_EVENT_CLASS
60 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
61 struct ftrace_raw_##name { \
62 struct trace_entry ent; \
63 tstruct \
64 char __data[0]; \
65 };
66 #undef DEFINE_EVENT
67 #define DEFINE_EVENT(template, name, proto, args) \
68 static struct ftrace_event_call event_##name
69
70 #undef DEFINE_EVENT_PRINT
71 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
72 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
73
74 #undef __cpparg
75 #define __cpparg(arg...) arg
76
77 /* Callbacks are meaningless to ftrace. */
78 #undef TRACE_EVENT_FN
79 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
80 assign, print, reg, unreg) \
81 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
82 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
83
84 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
85
86
87 /*
88 * Stage 2 of the trace events.
89 *
90 * Include the following:
91 *
92 * struct ftrace_data_offsets_<call> {
93 * u32 <item1>;
94 * u32 <item2>;
95 * [...]
96 * };
97 *
98 * The __dynamic_array() macro will create each u32 <item>, this is
99 * to keep the offset of each array from the beginning of the event.
100 * The size of an array is also encoded, in the higher 16 bits of <item>.
101 */
102
103 #undef __field
104 #define __field(type, item)
105
106 #undef __field_ext
107 #define __field_ext(type, item, filter_type)
108
109 #undef __array
110 #define __array(type, item, len)
111
112 #undef __dynamic_array
113 #define __dynamic_array(type, item, len) u32 item;
114
115 #undef __string
116 #define __string(item, src) __dynamic_array(char, item, -1)
117
118 #undef DECLARE_EVENT_CLASS
119 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
120 struct ftrace_data_offsets_##call { \
121 tstruct; \
122 };
123
124 #undef DEFINE_EVENT
125 #define DEFINE_EVENT(template, name, proto, args)
126
127 #undef DEFINE_EVENT_PRINT
128 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
129 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
130
131 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
132
133 /*
134 * Setup the showing format of trace point.
135 *
136 * int
137 * ftrace_format_##call(struct trace_seq *s)
138 * {
139 * struct ftrace_raw_##call field;
140 * int ret;
141 *
142 * ret = trace_seq_printf(s, #type " " #item ";"
143 * " offset:%u; size:%u;\n",
144 * offsetof(struct ftrace_raw_##call, item),
145 * sizeof(field.type));
146 *
147 * }
148 */
149
150 #undef TP_STRUCT__entry
151 #define TP_STRUCT__entry(args...) args
152
153 #undef __field
154 #define __field(type, item) \
155 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
156 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
157 (unsigned int)offsetof(typeof(field), item), \
158 (unsigned int)sizeof(field.item), \
159 (unsigned int)is_signed_type(type)); \
160 if (!ret) \
161 return 0;
162
163 #undef __field_ext
164 #define __field_ext(type, item, filter_type) __field(type, item)
165
166 #undef __array
167 #define __array(type, item, len) \
168 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
169 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
170 (unsigned int)offsetof(typeof(field), item), \
171 (unsigned int)sizeof(field.item), \
172 (unsigned int)is_signed_type(type)); \
173 if (!ret) \
174 return 0;
175
176 #undef __dynamic_array
177 #define __dynamic_array(type, item, len) \
178 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
179 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
180 (unsigned int)offsetof(typeof(field), \
181 __data_loc_##item), \
182 (unsigned int)sizeof(field.__data_loc_##item), \
183 (unsigned int)is_signed_type(type)); \
184 if (!ret) \
185 return 0;
186
187 #undef __string
188 #define __string(item, src) __dynamic_array(char, item, -1)
189
190 #undef __entry
191 #define __entry REC
192
193 #undef __print_symbolic
194 #undef __get_dynamic_array
195 #undef __get_str
196
197 #undef TP_printk
198 #define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
199
200 #undef TP_fast_assign
201 #define TP_fast_assign(args...) args
202
203 #undef TP_perf_assign
204 #define TP_perf_assign(args...)
205
206 #undef DECLARE_EVENT_CLASS
207 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
208 static int \
209 ftrace_format_setup_##call(struct ftrace_event_call *unused, \
210 struct trace_seq *s) \
211 { \
212 struct ftrace_raw_##call field __attribute__((unused)); \
213 int ret = 0; \
214 \
215 tstruct; \
216 \
217 return ret; \
218 } \
219 \
220 static int \
221 ftrace_format_##call(struct ftrace_event_call *unused, \
222 struct trace_seq *s) \
223 { \
224 int ret = 0; \
225 \
226 ret = ftrace_format_setup_##call(unused, s); \
227 if (!ret) \
228 return ret; \
229 \
230 ret = trace_seq_printf(s, "\nprint fmt: " print); \
231 \
232 return ret; \
233 }
234
235 #undef DEFINE_EVENT
236 #define DEFINE_EVENT(template, name, proto, args)
237
238 #undef DEFINE_EVENT_PRINT
239 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
240 static int \
241 ftrace_format_##name(struct ftrace_event_call *unused, \
242 struct trace_seq *s) \
243 { \
244 int ret = 0; \
245 \
246 ret = ftrace_format_setup_##template(unused, s); \
247 if (!ret) \
248 return ret; \
249 \
250 trace_seq_printf(s, "\nprint fmt: " print); \
251 \
252 return ret; \
253 }
254
255 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
256
257 /*
258 * Stage 3 of the trace events.
259 *
260 * Override the macros in <trace/trace_events.h> to include the following:
261 *
262 * enum print_line_t
263 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
264 * {
265 * struct trace_seq *s = &iter->seq;
266 * struct ftrace_raw_<call> *field; <-- defined in stage 1
267 * struct trace_entry *entry;
268 * struct trace_seq *p;
269 * int ret;
270 *
271 * entry = iter->ent;
272 *
273 * if (entry->type != event_<call>.id) {
274 * WARN_ON_ONCE(1);
275 * return TRACE_TYPE_UNHANDLED;
276 * }
277 *
278 * field = (typeof(field))entry;
279 *
280 * p = get_cpu_var(ftrace_event_seq);
281 * trace_seq_init(p);
282 * ret = trace_seq_printf(s, <TP_printk> "\n");
283 * put_cpu();
284 * if (!ret)
285 * return TRACE_TYPE_PARTIAL_LINE;
286 *
287 * return TRACE_TYPE_HANDLED;
288 * }
289 *
290 * This is the method used to print the raw event to the trace
291 * output format. Note, this is not needed if the data is read
292 * in binary.
293 */
294
295 #undef __entry
296 #define __entry field
297
298 #undef TP_printk
299 #define TP_printk(fmt, args...) fmt "\n", args
300
301 #undef __get_dynamic_array
302 #define __get_dynamic_array(field) \
303 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
304
305 #undef __get_str
306 #define __get_str(field) (char *)__get_dynamic_array(field)
307
308 #undef __print_flags
309 #define __print_flags(flag, delim, flag_array...) \
310 ({ \
311 static const struct trace_print_flags __flags[] = \
312 { flag_array, { -1, NULL }}; \
313 ftrace_print_flags_seq(p, delim, flag, __flags); \
314 })
315
316 #undef __print_symbolic
317 #define __print_symbolic(value, symbol_array...) \
318 ({ \
319 static const struct trace_print_flags symbols[] = \
320 { symbol_array, { -1, NULL }}; \
321 ftrace_print_symbols_seq(p, value, symbols); \
322 })
323
324 #undef DECLARE_EVENT_CLASS
325 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
326 static enum print_line_t \
327 ftrace_raw_output_id_##call(int event_id, const char *name, \
328 struct trace_iterator *iter, int flags) \
329 { \
330 struct trace_seq *s = &iter->seq; \
331 struct ftrace_raw_##call *field; \
332 struct trace_entry *entry; \
333 struct trace_seq *p; \
334 int ret; \
335 \
336 entry = iter->ent; \
337 \
338 if (entry->type != event_id) { \
339 WARN_ON_ONCE(1); \
340 return TRACE_TYPE_UNHANDLED; \
341 } \
342 \
343 field = (typeof(field))entry; \
344 \
345 p = &get_cpu_var(ftrace_event_seq); \
346 trace_seq_init(p); \
347 ret = trace_seq_printf(s, "%s: ", name); \
348 if (ret) \
349 ret = trace_seq_printf(s, print); \
350 put_cpu(); \
351 if (!ret) \
352 return TRACE_TYPE_PARTIAL_LINE; \
353 \
354 return TRACE_TYPE_HANDLED; \
355 }
356
357 #undef DEFINE_EVENT
358 #define DEFINE_EVENT(template, name, proto, args) \
359 static enum print_line_t \
360 ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
361 { \
362 return ftrace_raw_output_id_##template(event_##name.id, \
363 #name, iter, flags); \
364 }
365
366 #undef DEFINE_EVENT_PRINT
367 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
368 static enum print_line_t \
369 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
370 { \
371 struct trace_seq *s = &iter->seq; \
372 struct ftrace_raw_##template *field; \
373 struct trace_entry *entry; \
374 struct trace_seq *p; \
375 int ret; \
376 \
377 entry = iter->ent; \
378 \
379 if (entry->type != event_##call.id) { \
380 WARN_ON_ONCE(1); \
381 return TRACE_TYPE_UNHANDLED; \
382 } \
383 \
384 field = (typeof(field))entry; \
385 \
386 p = &get_cpu_var(ftrace_event_seq); \
387 trace_seq_init(p); \
388 ret = trace_seq_printf(s, "%s: ", #call); \
389 if (ret) \
390 ret = trace_seq_printf(s, print); \
391 put_cpu(); \
392 if (!ret) \
393 return TRACE_TYPE_PARTIAL_LINE; \
394 \
395 return TRACE_TYPE_HANDLED; \
396 }
397
398 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
399
400 #undef __field_ext
401 #define __field_ext(type, item, filter_type) \
402 ret = trace_define_field(event_call, #type, #item, \
403 offsetof(typeof(field), item), \
404 sizeof(field.item), \
405 is_signed_type(type), filter_type); \
406 if (ret) \
407 return ret;
408
409 #undef __field
410 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
411
412 #undef __array
413 #define __array(type, item, len) \
414 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
415 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
416 offsetof(typeof(field), item), \
417 sizeof(field.item), 0, FILTER_OTHER); \
418 if (ret) \
419 return ret;
420
421 #undef __dynamic_array
422 #define __dynamic_array(type, item, len) \
423 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
424 offsetof(typeof(field), __data_loc_##item), \
425 sizeof(field.__data_loc_##item), 0, \
426 FILTER_OTHER);
427
428 #undef __string
429 #define __string(item, src) __dynamic_array(char, item, -1)
430
431 #undef DECLARE_EVENT_CLASS
432 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
433 static int \
434 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
435 { \
436 struct ftrace_raw_##call field; \
437 int ret; \
438 \
439 ret = trace_define_common_fields(event_call); \
440 if (ret) \
441 return ret; \
442 \
443 tstruct; \
444 \
445 return ret; \
446 }
447
448 #undef DEFINE_EVENT
449 #define DEFINE_EVENT(template, name, proto, args)
450
451 #undef DEFINE_EVENT_PRINT
452 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
453 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
454
455 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
456
457 /*
458 * remember the offset of each array from the beginning of the event.
459 */
460
461 #undef __entry
462 #define __entry entry
463
464 #undef __field
465 #define __field(type, item)
466
467 #undef __field_ext
468 #define __field_ext(type, item, filter_type)
469
470 #undef __array
471 #define __array(type, item, len)
472
473 #undef __dynamic_array
474 #define __dynamic_array(type, item, len) \
475 __data_offsets->item = __data_size + \
476 offsetof(typeof(*entry), __data); \
477 __data_offsets->item |= (len * sizeof(type)) << 16; \
478 __data_size += (len) * sizeof(type);
479
480 #undef __string
481 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
482
483 #undef DECLARE_EVENT_CLASS
484 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
485 static inline int ftrace_get_offsets_##call( \
486 struct ftrace_data_offsets_##call *__data_offsets, proto) \
487 { \
488 int __data_size = 0; \
489 struct ftrace_raw_##call __maybe_unused *entry; \
490 \
491 tstruct; \
492 \
493 return __data_size; \
494 }
495
496 #undef DEFINE_EVENT
497 #define DEFINE_EVENT(template, name, proto, args)
498
499 #undef DEFINE_EVENT_PRINT
500 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
501 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
502
503 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
504
505 #ifdef CONFIG_EVENT_PROFILE
506
507 /*
508 * Generate the functions needed for tracepoint perf_event support.
509 *
510 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
511 *
512 * static int ftrace_profile_enable_<call>(void)
513 * {
514 * return register_trace_<call>(ftrace_profile_<call>);
515 * }
516 *
517 * static void ftrace_profile_disable_<call>(void)
518 * {
519 * unregister_trace_<call>(ftrace_profile_<call>);
520 * }
521 *
522 */
523
524 #undef DECLARE_EVENT_CLASS
525 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
526
527 #undef DEFINE_EVENT
528 #define DEFINE_EVENT(template, name, proto, args) \
529 \
530 static void ftrace_profile_##name(proto); \
531 \
532 static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
533 { \
534 return register_trace_##name(ftrace_profile_##name); \
535 } \
536 \
537 static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
538 { \
539 unregister_trace_##name(ftrace_profile_##name); \
540 }
541
542 #undef DEFINE_EVENT_PRINT
543 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
544 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
545
546 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
547
548 #endif
549
550 /*
551 * Stage 4 of the trace events.
552 *
553 * Override the macros in <trace/trace_events.h> to include the following:
554 *
555 * static void ftrace_event_<call>(proto)
556 * {
557 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
558 * }
559 *
560 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
561 * {
562 * int ret;
563 *
564 * ret = register_trace_<call>(ftrace_event_<call>);
565 * if (!ret)
566 * pr_info("event trace: Could not activate trace point "
567 * "probe to <call>");
568 * return ret;
569 * }
570 *
571 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
572 * {
573 * unregister_trace_<call>(ftrace_event_<call>);
574 * }
575 *
576 *
577 * For those macros defined with TRACE_EVENT:
578 *
579 * static struct ftrace_event_call event_<call>;
580 *
581 * static void ftrace_raw_event_<call>(proto)
582 * {
583 * struct ring_buffer_event *event;
584 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
585 * struct ring_buffer *buffer;
586 * unsigned long irq_flags;
587 * int pc;
588 *
589 * local_save_flags(irq_flags);
590 * pc = preempt_count();
591 *
592 * event = trace_current_buffer_lock_reserve(&buffer,
593 * event_<call>.id,
594 * sizeof(struct ftrace_raw_<call>),
595 * irq_flags, pc);
596 * if (!event)
597 * return;
598 * entry = ring_buffer_event_data(event);
599 *
600 * <assign>; <-- Here we assign the entries by the __field and
601 * __array macros.
602 *
603 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
604 * }
605 *
606 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
607 * {
608 * int ret;
609 *
610 * ret = register_trace_<call>(ftrace_raw_event_<call>);
611 * if (!ret)
612 * pr_info("event trace: Could not activate trace point "
613 * "probe to <call>");
614 * return ret;
615 * }
616 *
617 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
618 * {
619 * unregister_trace_<call>(ftrace_raw_event_<call>);
620 * }
621 *
622 * static struct trace_event ftrace_event_type_<call> = {
623 * .trace = ftrace_raw_output_<call>, <-- stage 2
624 * };
625 *
626 * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused)
627 * {
628 * int id;
629 *
630 * id = register_ftrace_event(&ftrace_event_type_<call>);
631 * if (!id)
632 * return -ENODEV;
633 * event_<call>.id = id;
634 * return 0;
635 * }
636 *
637 * static struct ftrace_event_call __used
638 * __attribute__((__aligned__(4)))
639 * __attribute__((section("_ftrace_events"))) event_<call> = {
640 * .name = "<call>",
641 * .system = "<system>",
642 * .raw_init = ftrace_raw_init_event_<call>,
643 * .regfunc = ftrace_reg_event_<call>,
644 * .unregfunc = ftrace_unreg_event_<call>,
645 * .show_format = ftrace_format_<call>,
646 * }
647 *
648 */
649
650 #undef TP_FMT
651 #define TP_FMT(fmt, args...) fmt "\n", ##args
652
653 #ifdef CONFIG_EVENT_PROFILE
654
655 #define _TRACE_PROFILE_INIT(call) \
656 .profile_count = ATOMIC_INIT(-1), \
657 .profile_enable = ftrace_profile_enable_##call, \
658 .profile_disable = ftrace_profile_disable_##call,
659
660 #else
661 #define _TRACE_PROFILE_INIT(call)
662 #endif
663
664 #undef __entry
665 #define __entry entry
666
667 #undef __field
668 #define __field(type, item)
669
670 #undef __array
671 #define __array(type, item, len)
672
673 #undef __dynamic_array
674 #define __dynamic_array(type, item, len) \
675 __entry->__data_loc_##item = __data_offsets.item;
676
677 #undef __string
678 #define __string(item, src) __dynamic_array(char, item, -1) \
679
680 #undef __assign_str
681 #define __assign_str(dst, src) \
682 strcpy(__get_str(dst), src);
683
684 #undef DECLARE_EVENT_CLASS
685 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
686 \
687 static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
688 proto) \
689 { \
690 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
691 struct ring_buffer_event *event; \
692 struct ftrace_raw_##call *entry; \
693 struct ring_buffer *buffer; \
694 unsigned long irq_flags; \
695 int __data_size; \
696 int pc; \
697 \
698 local_save_flags(irq_flags); \
699 pc = preempt_count(); \
700 \
701 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
702 \
703 event = trace_current_buffer_lock_reserve(&buffer, \
704 event_call->id, \
705 sizeof(*entry) + __data_size, \
706 irq_flags, pc); \
707 if (!event) \
708 return; \
709 entry = ring_buffer_event_data(event); \
710 \
711 \
712 tstruct \
713 \
714 { assign; } \
715 \
716 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
717 trace_nowake_buffer_unlock_commit(buffer, \
718 event, irq_flags, pc); \
719 }
720
721 #undef DEFINE_EVENT
722 #define DEFINE_EVENT(template, call, proto, args) \
723 \
724 static void ftrace_raw_event_##call(proto) \
725 { \
726 ftrace_raw_event_id_##template(&event_##call, args); \
727 } \
728 \
729 static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
730 { \
731 int ret; \
732 \
733 ret = register_trace_##call(ftrace_raw_event_##call); \
734 if (ret) \
735 pr_info("event trace: Could not activate trace point " \
736 "probe to " #call "\n"); \
737 return ret; \
738 } \
739 \
740 static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
741 { \
742 unregister_trace_##call(ftrace_raw_event_##call); \
743 } \
744 \
745 static struct trace_event ftrace_event_type_##call = { \
746 .trace = ftrace_raw_output_##call, \
747 }; \
748 \
749 static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
750 { \
751 int id; \
752 \
753 id = register_ftrace_event(&ftrace_event_type_##call); \
754 if (!id) \
755 return -ENODEV; \
756 event_##call.id = id; \
757 INIT_LIST_HEAD(&event_##call.fields); \
758 return 0; \
759 }
760
761 #undef DEFINE_EVENT_PRINT
762 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
763 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
764
765 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
766
767 #undef DECLARE_EVENT_CLASS
768 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
769
770 #undef DEFINE_EVENT
771 #define DEFINE_EVENT(template, call, proto, args) \
772 \
773 static struct ftrace_event_call __used \
774 __attribute__((__aligned__(4))) \
775 __attribute__((section("_ftrace_events"))) event_##call = { \
776 .name = #call, \
777 .system = __stringify(TRACE_SYSTEM), \
778 .event = &ftrace_event_type_##call, \
779 .raw_init = ftrace_raw_init_event_##call, \
780 .regfunc = ftrace_raw_reg_event_##call, \
781 .unregfunc = ftrace_raw_unreg_event_##call, \
782 .show_format = ftrace_format_##template, \
783 .define_fields = ftrace_define_fields_##template, \
784 _TRACE_PROFILE_INIT(call) \
785 }
786
787 #undef DEFINE_EVENT_PRINT
788 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
789 \
790 static struct ftrace_event_call __used \
791 __attribute__((__aligned__(4))) \
792 __attribute__((section("_ftrace_events"))) event_##call = { \
793 .name = #call, \
794 .system = __stringify(TRACE_SYSTEM), \
795 .event = &ftrace_event_type_##call, \
796 .raw_init = ftrace_raw_init_event_##call, \
797 .regfunc = ftrace_raw_reg_event_##call, \
798 .unregfunc = ftrace_raw_unreg_event_##call, \
799 .show_format = ftrace_format_##call, \
800 .define_fields = ftrace_define_fields_##template, \
801 _TRACE_PROFILE_INIT(call) \
802 }
803
804 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
805
806 /*
807 * Define the insertion callback to profile events
808 *
809 * The job is very similar to ftrace_raw_event_<call> except that we don't
810 * insert in the ring buffer but in a perf counter.
811 *
812 * static void ftrace_profile_<call>(proto)
813 * {
814 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
815 * struct ftrace_event_call *event_call = &event_<call>;
816 * extern void perf_tp_event(int, u64, u64, void *, int);
817 * struct ftrace_raw_##call *entry;
818 * struct perf_trace_buf *trace_buf;
819 * u64 __addr = 0, __count = 1;
820 * unsigned long irq_flags;
821 * struct trace_entry *ent;
822 * int __entry_size;
823 * int __data_size;
824 * int __cpu
825 * int pc;
826 *
827 * pc = preempt_count();
828 *
829 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
830 *
831 * // Below we want to get the aligned size by taking into account
832 * // the u32 field that will later store the buffer size
833 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
834 * sizeof(u64));
835 * __entry_size -= sizeof(u32);
836 *
837 * // Protect the non nmi buffer
838 * // This also protects the rcu read side
839 * local_irq_save(irq_flags);
840 * __cpu = smp_processor_id();
841 *
842 * if (in_nmi())
843 * trace_buf = rcu_dereference(perf_trace_buf_nmi);
844 * else
845 * trace_buf = rcu_dereference(perf_trace_buf);
846 *
847 * if (!trace_buf)
848 * goto end;
849 *
850 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
851 *
852 * // Avoid recursion from perf that could mess up the buffer
853 * if (trace_buf->recursion++)
854 * goto end_recursion;
855 *
856 * raw_data = trace_buf->buf;
857 *
858 * // Make recursion update visible before entering perf_tp_event
859 * // so that we protect from perf recursions.
860 *
861 * barrier();
862 *
863 * //zero dead bytes from alignment to avoid stack leak to userspace:
864 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
865 * entry = (struct ftrace_raw_<call> *)raw_data;
866 * ent = &entry->ent;
867 * tracing_generic_entry_update(ent, irq_flags, pc);
868 * ent->type = event_call->id;
869 *
870 * <tstruct> <- do some jobs with dynamic arrays
871 *
872 * <assign> <- affect our values
873 *
874 * perf_tp_event(event_call->id, __addr, __count, entry,
875 * __entry_size); <- submit them to perf counter
876 *
877 * }
878 */
879
880 #ifdef CONFIG_EVENT_PROFILE
881
882 #undef __perf_addr
883 #define __perf_addr(a) __addr = (a)
884
885 #undef __perf_count
886 #define __perf_count(c) __count = (c)
887
888 #undef DECLARE_EVENT_CLASS
889 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
890 static void \
891 ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
892 proto) \
893 { \
894 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
895 extern int perf_swevent_get_recursion_context(void); \
896 extern void perf_swevent_put_recursion_context(int rctx); \
897 extern void perf_tp_event(int, u64, u64, void *, int); \
898 struct ftrace_raw_##call *entry; \
899 u64 __addr = 0, __count = 1; \
900 unsigned long irq_flags; \
901 struct trace_entry *ent; \
902 int __entry_size; \
903 int __data_size; \
904 char *trace_buf; \
905 char *raw_data; \
906 int __cpu; \
907 int rctx; \
908 int pc; \
909 \
910 pc = preempt_count(); \
911 \
912 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
913 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
914 sizeof(u64)); \
915 __entry_size -= sizeof(u32); \
916 \
917 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
918 "profile buffer not large enough")) \
919 return; \
920 \
921 local_irq_save(irq_flags); \
922 \
923 rctx = perf_swevent_get_recursion_context(); \
924 if (rctx < 0) \
925 goto end_recursion; \
926 \
927 __cpu = smp_processor_id(); \
928 \
929 if (in_nmi()) \
930 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
931 else \
932 trace_buf = rcu_dereference(perf_trace_buf); \
933 \
934 if (!trace_buf) \
935 goto end; \
936 \
937 raw_data = per_cpu_ptr(trace_buf, __cpu); \
938 \
939 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
940 entry = (struct ftrace_raw_##call *)raw_data; \
941 ent = &entry->ent; \
942 tracing_generic_entry_update(ent, irq_flags, pc); \
943 ent->type = event_call->id; \
944 \
945 tstruct \
946 \
947 { assign; } \
948 \
949 perf_tp_event(event_call->id, __addr, __count, entry, \
950 __entry_size); \
951 \
952 end: \
953 perf_swevent_put_recursion_context(rctx); \
954 end_recursion: \
955 local_irq_restore(irq_flags); \
956 \
957 }
958
959 #undef DEFINE_EVENT
960 #define DEFINE_EVENT(template, call, proto, args) \
961 static void ftrace_profile_##call(proto) \
962 { \
963 struct ftrace_event_call *event_call = &event_##call; \
964 \
965 ftrace_profile_templ_##template(event_call, args); \
966 }
967
968 #undef DEFINE_EVENT_PRINT
969 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
970 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
971
972 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
973 #endif /* CONFIG_EVENT_PROFILE */
974
975 #undef _TRACE_PROFILE_INIT
976
This page took 0.132803 seconds and 4 git commands to generate.