Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[deliverable/linux.git] / include / trace / ftrace.h
1 /*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19 #include <linux/ftrace_event.h>
20
21 #undef __field
22 #define __field(type, item) type item;
23
24 #undef __array
25 #define __array(type, item, len) type item[len];
26
27 #undef __dynamic_array
28 #define __dynamic_array(type, item, len) unsigned short __data_loc_##item;
29
30 #undef __string
31 #define __string(item, src) __dynamic_array(char, item, -1)
32
33 #undef TP_STRUCT__entry
34 #define TP_STRUCT__entry(args...) args
35
36 #undef TRACE_EVENT
37 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
38 struct ftrace_raw_##name { \
39 struct trace_entry ent; \
40 tstruct \
41 char __data[0]; \
42 }; \
43 static struct ftrace_event_call event_##name
44
45 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
46
47
48 /*
49 * Stage 2 of the trace events.
50 *
51 * Include the following:
52 *
53 * struct ftrace_data_offsets_<call> {
54 * int <item1>;
55 * int <item2>;
56 * [...]
57 * };
58 *
59 * The __dynamic_array() macro will create each int <item>, this is
60 * to keep the offset of each array from the beginning of the event.
61 */
62
63 #undef __field
64 #define __field(type, item);
65
66 #undef __array
67 #define __array(type, item, len)
68
69 #undef __dynamic_array
70 #define __dynamic_array(type, item, len) int item;
71
72 #undef __string
73 #define __string(item, src) __dynamic_array(char, item, -1)
74
75 #undef TRACE_EVENT
76 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
77 struct ftrace_data_offsets_##call { \
78 tstruct; \
79 };
80
81 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
82
83 /*
84 * Setup the showing format of trace point.
85 *
86 * int
87 * ftrace_format_##call(struct trace_seq *s)
88 * {
89 * struct ftrace_raw_##call field;
90 * int ret;
91 *
92 * ret = trace_seq_printf(s, #type " " #item ";"
93 * " offset:%u; size:%u;\n",
94 * offsetof(struct ftrace_raw_##call, item),
95 * sizeof(field.type));
96 *
97 * }
98 */
99
100 #undef TP_STRUCT__entry
101 #define TP_STRUCT__entry(args...) args
102
103 #undef __field
104 #define __field(type, item) \
105 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
106 "offset:%u;\tsize:%u;\n", \
107 (unsigned int)offsetof(typeof(field), item), \
108 (unsigned int)sizeof(field.item)); \
109 if (!ret) \
110 return 0;
111
112 #undef __array
113 #define __array(type, item, len) \
114 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
115 "offset:%u;\tsize:%u;\n", \
116 (unsigned int)offsetof(typeof(field), item), \
117 (unsigned int)sizeof(field.item)); \
118 if (!ret) \
119 return 0;
120
121 #undef __dynamic_array
122 #define __dynamic_array(type, item, len) \
123 ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \
124 "offset:%u;\tsize:%u;\n", \
125 (unsigned int)offsetof(typeof(field), \
126 __data_loc_##item), \
127 (unsigned int)sizeof(field.__data_loc_##item)); \
128 if (!ret) \
129 return 0;
130
131 #undef __string
132 #define __string(item, src) __dynamic_array(char, item, -1)
133
134 #undef __entry
135 #define __entry REC
136
137 #undef __print_symbolic
138 #undef __get_dynamic_array
139 #undef __get_str
140
141 #undef TP_printk
142 #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
143
144 #undef TP_fast_assign
145 #define TP_fast_assign(args...) args
146
147 #undef TP_perf_assign
148 #define TP_perf_assign(args...)
149
150 #undef TRACE_EVENT
151 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
152 static int \
153 ftrace_format_##call(struct trace_seq *s) \
154 { \
155 struct ftrace_raw_##call field __attribute__((unused)); \
156 int ret = 0; \
157 \
158 tstruct; \
159 \
160 trace_seq_printf(s, "\nprint fmt: " print); \
161 \
162 return ret; \
163 }
164
165 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
166
167 /*
168 * Stage 3 of the trace events.
169 *
170 * Override the macros in <trace/trace_events.h> to include the following:
171 *
172 * enum print_line_t
173 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
174 * {
175 * struct trace_seq *s = &iter->seq;
176 * struct ftrace_raw_<call> *field; <-- defined in stage 1
177 * struct trace_entry *entry;
178 * struct trace_seq *p;
179 * int ret;
180 *
181 * entry = iter->ent;
182 *
183 * if (entry->type != event_<call>.id) {
184 * WARN_ON_ONCE(1);
185 * return TRACE_TYPE_UNHANDLED;
186 * }
187 *
188 * field = (typeof(field))entry;
189 *
190 * p = get_cpu_var(ftrace_event_seq);
191 * trace_seq_init(p);
192 * ret = trace_seq_printf(s, <TP_printk> "\n");
193 * put_cpu();
194 * if (!ret)
195 * return TRACE_TYPE_PARTIAL_LINE;
196 *
197 * return TRACE_TYPE_HANDLED;
198 * }
199 *
200 * This is the method used to print the raw event to the trace
201 * output format. Note, this is not needed if the data is read
202 * in binary.
203 */
204
205 #undef __entry
206 #define __entry field
207
208 #undef TP_printk
209 #define TP_printk(fmt, args...) fmt "\n", args
210
211 #undef __get_dynamic_array
212 #define __get_dynamic_array(field) \
213 ((void *)__entry + __entry->__data_loc_##field)
214
215 #undef __get_str
216 #define __get_str(field) (char *)__get_dynamic_array(field)
217
218 #undef __print_flags
219 #define __print_flags(flag, delim, flag_array...) \
220 ({ \
221 static const struct trace_print_flags flags[] = \
222 { flag_array, { -1, NULL }}; \
223 ftrace_print_flags_seq(p, delim, flag, flags); \
224 })
225
226 #undef __print_symbolic
227 #define __print_symbolic(value, symbol_array...) \
228 ({ \
229 static const struct trace_print_flags symbols[] = \
230 { symbol_array, { -1, NULL }}; \
231 ftrace_print_symbols_seq(p, value, symbols); \
232 })
233
234 #undef TRACE_EVENT
235 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
236 enum print_line_t \
237 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
238 { \
239 struct trace_seq *s = &iter->seq; \
240 struct ftrace_raw_##call *field; \
241 struct trace_entry *entry; \
242 struct trace_seq *p; \
243 int ret; \
244 \
245 entry = iter->ent; \
246 \
247 if (entry->type != event_##call.id) { \
248 WARN_ON_ONCE(1); \
249 return TRACE_TYPE_UNHANDLED; \
250 } \
251 \
252 field = (typeof(field))entry; \
253 \
254 p = &get_cpu_var(ftrace_event_seq); \
255 trace_seq_init(p); \
256 ret = trace_seq_printf(s, #call ": " print); \
257 put_cpu(); \
258 if (!ret) \
259 return TRACE_TYPE_PARTIAL_LINE; \
260 \
261 return TRACE_TYPE_HANDLED; \
262 }
263
264 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
265
266 #undef __field
267 #define __field(type, item) \
268 ret = trace_define_field(event_call, #type, #item, \
269 offsetof(typeof(field), item), \
270 sizeof(field.item), is_signed_type(type)); \
271 if (ret) \
272 return ret;
273
274 #undef __array
275 #define __array(type, item, len) \
276 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
277 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
278 offsetof(typeof(field), item), \
279 sizeof(field.item), 0); \
280 if (ret) \
281 return ret;
282
283 #undef __dynamic_array
284 #define __dynamic_array(type, item, len) \
285 ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\
286 offsetof(typeof(field), __data_loc_##item), \
287 sizeof(field.__data_loc_##item), 0);
288
289 #undef __string
290 #define __string(item, src) __dynamic_array(char, item, -1)
291
292 #undef TRACE_EVENT
293 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
294 int \
295 ftrace_define_fields_##call(void) \
296 { \
297 struct ftrace_raw_##call field; \
298 struct ftrace_event_call *event_call = &event_##call; \
299 int ret; \
300 \
301 __common_field(int, type, 1); \
302 __common_field(unsigned char, flags, 0); \
303 __common_field(unsigned char, preempt_count, 0); \
304 __common_field(int, pid, 1); \
305 __common_field(int, tgid, 1); \
306 \
307 tstruct; \
308 \
309 return ret; \
310 }
311
312 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
313
314 /*
315 * remember the offset of each array from the beginning of the event.
316 */
317
318 #undef __entry
319 #define __entry entry
320
321 #undef __field
322 #define __field(type, item)
323
324 #undef __array
325 #define __array(type, item, len)
326
327 #undef __dynamic_array
328 #define __dynamic_array(type, item, len) \
329 __data_offsets->item = __data_size + \
330 offsetof(typeof(*entry), __data); \
331 __data_size += (len) * sizeof(type);
332
333 #undef __string
334 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
335
336 #undef TRACE_EVENT
337 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
338 static inline int ftrace_get_offsets_##call( \
339 struct ftrace_data_offsets_##call *__data_offsets, proto) \
340 { \
341 int __data_size = 0; \
342 struct ftrace_raw_##call __maybe_unused *entry; \
343 \
344 tstruct; \
345 \
346 return __data_size; \
347 }
348
349 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
350
351 #ifdef CONFIG_EVENT_PROFILE
352
353 /*
354 * Generate the functions needed for tracepoint perf_counter support.
355 *
356 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
357 *
358 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
359 * {
360 * int ret = 0;
361 *
362 * if (!atomic_inc_return(&event_call->profile_count))
363 * ret = register_trace_<call>(ftrace_profile_<call>);
364 *
365 * return ret;
366 * }
367 *
368 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
369 * {
370 * if (atomic_add_negative(-1, &event->call->profile_count))
371 * unregister_trace_<call>(ftrace_profile_<call>);
372 * }
373 *
374 */
375
376 #undef TRACE_EVENT
377 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
378 \
379 static void ftrace_profile_##call(proto); \
380 \
381 static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
382 { \
383 int ret = 0; \
384 \
385 if (!atomic_inc_return(&event_call->profile_count)) \
386 ret = register_trace_##call(ftrace_profile_##call); \
387 \
388 return ret; \
389 } \
390 \
391 static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
392 { \
393 if (atomic_add_negative(-1, &event_call->profile_count)) \
394 unregister_trace_##call(ftrace_profile_##call); \
395 }
396
397 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
398
399 #endif
400
401 /*
402 * Stage 4 of the trace events.
403 *
404 * Override the macros in <trace/trace_events.h> to include the following:
405 *
406 * static void ftrace_event_<call>(proto)
407 * {
408 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
409 * }
410 *
411 * static int ftrace_reg_event_<call>(void)
412 * {
413 * int ret;
414 *
415 * ret = register_trace_<call>(ftrace_event_<call>);
416 * if (!ret)
417 * pr_info("event trace: Could not activate trace point "
418 * "probe to <call>");
419 * return ret;
420 * }
421 *
422 * static void ftrace_unreg_event_<call>(void)
423 * {
424 * unregister_trace_<call>(ftrace_event_<call>);
425 * }
426 *
427 *
428 * For those macros defined with TRACE_EVENT:
429 *
430 * static struct ftrace_event_call event_<call>;
431 *
432 * static void ftrace_raw_event_<call>(proto)
433 * {
434 * struct ring_buffer_event *event;
435 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
436 * unsigned long irq_flags;
437 * int pc;
438 *
439 * local_save_flags(irq_flags);
440 * pc = preempt_count();
441 *
442 * event = trace_current_buffer_lock_reserve(event_<call>.id,
443 * sizeof(struct ftrace_raw_<call>),
444 * irq_flags, pc);
445 * if (!event)
446 * return;
447 * entry = ring_buffer_event_data(event);
448 *
449 * <assign>; <-- Here we assign the entries by the __field and
450 * __array macros.
451 *
452 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
453 * }
454 *
455 * static int ftrace_raw_reg_event_<call>(void)
456 * {
457 * int ret;
458 *
459 * ret = register_trace_<call>(ftrace_raw_event_<call>);
460 * if (!ret)
461 * pr_info("event trace: Could not activate trace point "
462 * "probe to <call>");
463 * return ret;
464 * }
465 *
466 * static void ftrace_unreg_event_<call>(void)
467 * {
468 * unregister_trace_<call>(ftrace_raw_event_<call>);
469 * }
470 *
471 * static struct trace_event ftrace_event_type_<call> = {
472 * .trace = ftrace_raw_output_<call>, <-- stage 2
473 * };
474 *
475 * static int ftrace_raw_init_event_<call>(void)
476 * {
477 * int id;
478 *
479 * id = register_ftrace_event(&ftrace_event_type_<call>);
480 * if (!id)
481 * return -ENODEV;
482 * event_<call>.id = id;
483 * return 0;
484 * }
485 *
486 * static struct ftrace_event_call __used
487 * __attribute__((__aligned__(4)))
488 * __attribute__((section("_ftrace_events"))) event_<call> = {
489 * .name = "<call>",
490 * .system = "<system>",
491 * .raw_init = ftrace_raw_init_event_<call>,
492 * .regfunc = ftrace_reg_event_<call>,
493 * .unregfunc = ftrace_unreg_event_<call>,
494 * .show_format = ftrace_format_<call>,
495 * }
496 *
497 */
498
499 #undef TP_FMT
500 #define TP_FMT(fmt, args...) fmt "\n", ##args
501
502 #ifdef CONFIG_EVENT_PROFILE
503
504 #define _TRACE_PROFILE_INIT(call) \
505 .profile_count = ATOMIC_INIT(-1), \
506 .profile_enable = ftrace_profile_enable_##call, \
507 .profile_disable = ftrace_profile_disable_##call,
508
509 #else
510 #define _TRACE_PROFILE_INIT(call)
511 #endif
512
513 #undef __entry
514 #define __entry entry
515
516 #undef __field
517 #define __field(type, item)
518
519 #undef __array
520 #define __array(type, item, len)
521
522 #undef __dynamic_array
523 #define __dynamic_array(type, item, len) \
524 __entry->__data_loc_##item = __data_offsets.item;
525
526 #undef __string
527 #define __string(item, src) __dynamic_array(char, item, -1) \
528
529 #undef __assign_str
530 #define __assign_str(dst, src) \
531 strcpy(__get_str(dst), src);
532
533 #undef TRACE_EVENT
534 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
535 \
536 static struct ftrace_event_call event_##call; \
537 \
538 static void ftrace_raw_event_##call(proto) \
539 { \
540 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
541 struct ftrace_event_call *event_call = &event_##call; \
542 struct ring_buffer_event *event; \
543 struct ftrace_raw_##call *entry; \
544 unsigned long irq_flags; \
545 int __data_size; \
546 int pc; \
547 \
548 local_save_flags(irq_flags); \
549 pc = preempt_count(); \
550 \
551 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
552 \
553 event = trace_current_buffer_lock_reserve(event_##call.id, \
554 sizeof(*entry) + __data_size, \
555 irq_flags, pc); \
556 if (!event) \
557 return; \
558 entry = ring_buffer_event_data(event); \
559 \
560 \
561 tstruct \
562 \
563 { assign; } \
564 \
565 if (!filter_current_check_discard(event_call, entry, event)) \
566 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
567 } \
568 \
569 static int ftrace_raw_reg_event_##call(void) \
570 { \
571 int ret; \
572 \
573 ret = register_trace_##call(ftrace_raw_event_##call); \
574 if (ret) \
575 pr_info("event trace: Could not activate trace point " \
576 "probe to " #call "\n"); \
577 return ret; \
578 } \
579 \
580 static void ftrace_raw_unreg_event_##call(void) \
581 { \
582 unregister_trace_##call(ftrace_raw_event_##call); \
583 } \
584 \
585 static struct trace_event ftrace_event_type_##call = { \
586 .trace = ftrace_raw_output_##call, \
587 }; \
588 \
589 static int ftrace_raw_init_event_##call(void) \
590 { \
591 int id; \
592 \
593 id = register_ftrace_event(&ftrace_event_type_##call); \
594 if (!id) \
595 return -ENODEV; \
596 event_##call.id = id; \
597 INIT_LIST_HEAD(&event_##call.fields); \
598 init_preds(&event_##call); \
599 return 0; \
600 } \
601 \
602 static struct ftrace_event_call __used \
603 __attribute__((__aligned__(4))) \
604 __attribute__((section("_ftrace_events"))) event_##call = { \
605 .name = #call, \
606 .system = __stringify(TRACE_SYSTEM), \
607 .event = &ftrace_event_type_##call, \
608 .raw_init = ftrace_raw_init_event_##call, \
609 .regfunc = ftrace_raw_reg_event_##call, \
610 .unregfunc = ftrace_raw_unreg_event_##call, \
611 .show_format = ftrace_format_##call, \
612 .define_fields = ftrace_define_fields_##call, \
613 _TRACE_PROFILE_INIT(call) \
614 }
615
616 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
617
618 /*
619 * Define the insertion callback to profile events
620 *
621 * The job is very similar to ftrace_raw_event_<call> except that we don't
622 * insert in the ring buffer but in a perf counter.
623 *
624 * static void ftrace_profile_<call>(proto)
625 * {
626 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
627 * struct ftrace_event_call *event_call = &event_<call>;
628 * extern void perf_tpcounter_event(int, u64, u64, void *, int);
629 * struct ftrace_raw_##call *entry;
630 * u64 __addr = 0, __count = 1;
631 * unsigned long irq_flags;
632 * int __entry_size;
633 * int __data_size;
634 * int pc;
635 *
636 * local_save_flags(irq_flags);
637 * pc = preempt_count();
638 *
639 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
640 *
641 * // Below we want to get the aligned size by taking into account
642 * // the u32 field that will later store the buffer size
643 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
644 * sizeof(u64));
645 * __entry_size -= sizeof(u32);
646 *
647 * do {
648 * char raw_data[__entry_size]; <- allocate our sample in the stack
649 * struct trace_entry *ent;
650 *
651 * zero dead bytes from alignment to avoid stack leak to userspace:
652 *
653 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
654 * entry = (struct ftrace_raw_<call> *)raw_data;
655 * ent = &entry->ent;
656 * tracing_generic_entry_update(ent, irq_flags, pc);
657 * ent->type = event_call->id;
658 *
659 * <tstruct> <- do some jobs with dynamic arrays
660 *
661 * <assign> <- affect our values
662 *
663 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
664 * __entry_size); <- submit them to perf counter
665 * } while (0);
666 *
667 * }
668 */
669
670 #ifdef CONFIG_EVENT_PROFILE
671
672 #undef __perf_addr
673 #define __perf_addr(a) __addr = (a)
674
675 #undef __perf_count
676 #define __perf_count(c) __count = (c)
677
678 #undef TRACE_EVENT
679 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
680 static void ftrace_profile_##call(proto) \
681 { \
682 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
683 struct ftrace_event_call *event_call = &event_##call; \
684 extern void perf_tpcounter_event(int, u64, u64, void *, int); \
685 struct ftrace_raw_##call *entry; \
686 u64 __addr = 0, __count = 1; \
687 unsigned long irq_flags; \
688 int __entry_size; \
689 int __data_size; \
690 int pc; \
691 \
692 local_save_flags(irq_flags); \
693 pc = preempt_count(); \
694 \
695 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
696 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
697 sizeof(u64)); \
698 __entry_size -= sizeof(u32); \
699 \
700 do { \
701 char raw_data[__entry_size]; \
702 struct trace_entry *ent; \
703 \
704 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
705 entry = (struct ftrace_raw_##call *)raw_data; \
706 ent = &entry->ent; \
707 tracing_generic_entry_update(ent, irq_flags, pc); \
708 ent->type = event_call->id; \
709 \
710 tstruct \
711 \
712 { assign; } \
713 \
714 perf_tpcounter_event(event_call->id, __addr, __count, entry,\
715 __entry_size); \
716 } while (0); \
717 \
718 }
719
720 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
721 #endif /* CONFIG_EVENT_PROFILE */
722
723 #undef _TRACE_PROFILE_INIT
724
This page took 0.048743 seconds and 6 git commands to generate.