a7f946094128709c837358829486f513f614beae
[deliverable/linux.git] / include / trace / ftrace.h
1 /*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19 #include <linux/ftrace_event.h>
20
21 #undef __field
22 #define __field(type, item) type item;
23
24 #undef __field_ext
25 #define __field_ext(type, item, filter_type) type item;
26
27 #undef __array
28 #define __array(type, item, len) type item[len];
29
30 #undef __dynamic_array
31 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
32
33 #undef __string
34 #define __string(item, src) __dynamic_array(char, item, -1)
35
36 #undef TP_STRUCT__entry
37 #define TP_STRUCT__entry(args...) args
38
39 #undef TRACE_EVENT
40 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
41 struct ftrace_raw_##name { \
42 struct trace_entry ent; \
43 tstruct \
44 char __data[0]; \
45 }; \
46 static struct ftrace_event_call event_##name
47
48 #undef __cpparg
49 #define __cpparg(arg...) arg
50
51 /* Callbacks are meaningless to ftrace. */
52 #undef TRACE_EVENT_FN
53 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
54 assign, print, reg, unreg) \
55 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
56 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
57
58 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
59
60
61 /*
62 * Stage 2 of the trace events.
63 *
64 * Include the following:
65 *
66 * struct ftrace_data_offsets_<call> {
67 * u32 <item1>;
68 * u32 <item2>;
69 * [...]
70 * };
71 *
72 * The __dynamic_array() macro will create each u32 <item>, this is
73 * to keep the offset of each array from the beginning of the event.
74 * The size of an array is also encoded, in the higher 16 bits of <item>.
75 */
76
77 #undef __field
78 #define __field(type, item)
79
80 #undef __field_ext
81 #define __field_ext(type, item, filter_type)
82
83 #undef __array
84 #define __array(type, item, len)
85
86 #undef __dynamic_array
87 #define __dynamic_array(type, item, len) u32 item;
88
89 #undef __string
90 #define __string(item, src) __dynamic_array(char, item, -1)
91
92 #undef TRACE_EVENT
93 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
94 struct ftrace_data_offsets_##call { \
95 tstruct; \
96 };
97
98 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
99
100 /*
101 * Setup the showing format of trace point.
102 *
103 * int
104 * ftrace_format_##call(struct trace_seq *s)
105 * {
106 * struct ftrace_raw_##call field;
107 * int ret;
108 *
109 * ret = trace_seq_printf(s, #type " " #item ";"
110 * " offset:%u; size:%u;\n",
111 * offsetof(struct ftrace_raw_##call, item),
112 * sizeof(field.type));
113 *
114 * }
115 */
116
117 #undef TP_STRUCT__entry
118 #define TP_STRUCT__entry(args...) args
119
120 #undef __field
121 #define __field(type, item) \
122 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
123 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
124 (unsigned int)offsetof(typeof(field), item), \
125 (unsigned int)sizeof(field.item), \
126 (unsigned int)is_signed_type(type)); \
127 if (!ret) \
128 return 0;
129
130 #undef __field_ext
131 #define __field_ext(type, item, filter_type) __field(type, item)
132
133 #undef __array
134 #define __array(type, item, len) \
135 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
136 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
137 (unsigned int)offsetof(typeof(field), item), \
138 (unsigned int)sizeof(field.item), \
139 (unsigned int)is_signed_type(type)); \
140 if (!ret) \
141 return 0;
142
143 #undef __dynamic_array
144 #define __dynamic_array(type, item, len) \
145 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
146 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
147 (unsigned int)offsetof(typeof(field), \
148 __data_loc_##item), \
149 (unsigned int)sizeof(field.__data_loc_##item), \
150 (unsigned int)is_signed_type(type)); \
151 if (!ret) \
152 return 0;
153
154 #undef __string
155 #define __string(item, src) __dynamic_array(char, item, -1)
156
157 #undef __entry
158 #define __entry REC
159
160 #undef __print_symbolic
161 #undef __get_dynamic_array
162 #undef __get_str
163
164 #undef TP_printk
165 #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
166
167 #undef TP_fast_assign
168 #define TP_fast_assign(args...) args
169
170 #undef TP_perf_assign
171 #define TP_perf_assign(args...)
172
173 #undef TRACE_EVENT
174 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
175 static int \
176 ftrace_format_##call(struct ftrace_event_call *unused, \
177 struct trace_seq *s) \
178 { \
179 struct ftrace_raw_##call field __attribute__((unused)); \
180 int ret = 0; \
181 \
182 tstruct; \
183 \
184 trace_seq_printf(s, "\nprint fmt: " print); \
185 \
186 return ret; \
187 }
188
189 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
190
191 /*
192 * Stage 3 of the trace events.
193 *
194 * Override the macros in <trace/trace_events.h> to include the following:
195 *
196 * enum print_line_t
197 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
198 * {
199 * struct trace_seq *s = &iter->seq;
200 * struct ftrace_raw_<call> *field; <-- defined in stage 1
201 * struct trace_entry *entry;
202 * struct trace_seq *p;
203 * int ret;
204 *
205 * entry = iter->ent;
206 *
207 * if (entry->type != event_<call>.id) {
208 * WARN_ON_ONCE(1);
209 * return TRACE_TYPE_UNHANDLED;
210 * }
211 *
212 * field = (typeof(field))entry;
213 *
214 * p = get_cpu_var(ftrace_event_seq);
215 * trace_seq_init(p);
216 * ret = trace_seq_printf(s, <TP_printk> "\n");
217 * put_cpu();
218 * if (!ret)
219 * return TRACE_TYPE_PARTIAL_LINE;
220 *
221 * return TRACE_TYPE_HANDLED;
222 * }
223 *
224 * This is the method used to print the raw event to the trace
225 * output format. Note, this is not needed if the data is read
226 * in binary.
227 */
228
229 #undef __entry
230 #define __entry field
231
232 #undef TP_printk
233 #define TP_printk(fmt, args...) fmt "\n", args
234
235 #undef __get_dynamic_array
236 #define __get_dynamic_array(field) \
237 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
238
239 #undef __get_str
240 #define __get_str(field) (char *)__get_dynamic_array(field)
241
242 #undef __print_flags
243 #define __print_flags(flag, delim, flag_array...) \
244 ({ \
245 static const struct trace_print_flags __flags[] = \
246 { flag_array, { -1, NULL }}; \
247 ftrace_print_flags_seq(p, delim, flag, __flags); \
248 })
249
250 #undef __print_symbolic
251 #define __print_symbolic(value, symbol_array...) \
252 ({ \
253 static const struct trace_print_flags symbols[] = \
254 { symbol_array, { -1, NULL }}; \
255 ftrace_print_symbols_seq(p, value, symbols); \
256 })
257
258 #undef TRACE_EVENT
259 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
260 static enum print_line_t \
261 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
262 { \
263 struct trace_seq *s = &iter->seq; \
264 struct ftrace_raw_##call *field; \
265 struct trace_entry *entry; \
266 struct trace_seq *p; \
267 int ret; \
268 \
269 entry = iter->ent; \
270 \
271 if (entry->type != event_##call.id) { \
272 WARN_ON_ONCE(1); \
273 return TRACE_TYPE_UNHANDLED; \
274 } \
275 \
276 field = (typeof(field))entry; \
277 \
278 p = &get_cpu_var(ftrace_event_seq); \
279 trace_seq_init(p); \
280 ret = trace_seq_printf(s, #call ": " print); \
281 put_cpu(); \
282 if (!ret) \
283 return TRACE_TYPE_PARTIAL_LINE; \
284 \
285 return TRACE_TYPE_HANDLED; \
286 }
287
288 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
289
290 #undef __field_ext
291 #define __field_ext(type, item, filter_type) \
292 ret = trace_define_field(event_call, #type, #item, \
293 offsetof(typeof(field), item), \
294 sizeof(field.item), \
295 is_signed_type(type), filter_type); \
296 if (ret) \
297 return ret;
298
299 #undef __field
300 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
301
302 #undef __array
303 #define __array(type, item, len) \
304 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
305 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
306 offsetof(typeof(field), item), \
307 sizeof(field.item), 0, FILTER_OTHER); \
308 if (ret) \
309 return ret;
310
311 #undef __dynamic_array
312 #define __dynamic_array(type, item, len) \
313 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
314 offsetof(typeof(field), __data_loc_##item), \
315 sizeof(field.__data_loc_##item), 0, \
316 FILTER_OTHER);
317
318 #undef __string
319 #define __string(item, src) __dynamic_array(char, item, -1)
320
321 #undef TRACE_EVENT
322 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
323 static int \
324 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
325 { \
326 struct ftrace_raw_##call field; \
327 int ret; \
328 \
329 ret = trace_define_common_fields(event_call); \
330 if (ret) \
331 return ret; \
332 \
333 tstruct; \
334 \
335 return ret; \
336 }
337
338 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
339
340 /*
341 * remember the offset of each array from the beginning of the event.
342 */
343
344 #undef __entry
345 #define __entry entry
346
347 #undef __field
348 #define __field(type, item)
349
350 #undef __field_ext
351 #define __field_ext(type, item, filter_type)
352
353 #undef __array
354 #define __array(type, item, len)
355
356 #undef __dynamic_array
357 #define __dynamic_array(type, item, len) \
358 __data_offsets->item = __data_size + \
359 offsetof(typeof(*entry), __data); \
360 __data_offsets->item |= (len * sizeof(type)) << 16; \
361 __data_size += (len) * sizeof(type);
362
363 #undef __string
364 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
365
366 #undef TRACE_EVENT
367 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
368 static inline int ftrace_get_offsets_##call( \
369 struct ftrace_data_offsets_##call *__data_offsets, proto) \
370 { \
371 int __data_size = 0; \
372 struct ftrace_raw_##call __maybe_unused *entry; \
373 \
374 tstruct; \
375 \
376 return __data_size; \
377 }
378
379 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
380
381 #ifdef CONFIG_EVENT_PROFILE
382
383 /*
384 * Generate the functions needed for tracepoint perf_event support.
385 *
386 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
387 *
388 * static int ftrace_profile_enable_<call>(void)
389 * {
390 * return register_trace_<call>(ftrace_profile_<call>);
391 * }
392 *
393 * static void ftrace_profile_disable_<call>(void)
394 * {
395 * unregister_trace_<call>(ftrace_profile_<call>);
396 * }
397 *
398 */
399
400 #undef TRACE_EVENT
401 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
402 \
403 static void ftrace_profile_##call(proto); \
404 \
405 static int ftrace_profile_enable_##call(struct ftrace_event_call *unused)\
406 { \
407 return register_trace_##call(ftrace_profile_##call); \
408 } \
409 \
410 static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\
411 { \
412 unregister_trace_##call(ftrace_profile_##call); \
413 }
414
415 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
416
417 #endif
418
419 /*
420 * Stage 4 of the trace events.
421 *
422 * Override the macros in <trace/trace_events.h> to include the following:
423 *
424 * static void ftrace_event_<call>(proto)
425 * {
426 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
427 * }
428 *
429 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
430 * {
431 * int ret;
432 *
433 * ret = register_trace_<call>(ftrace_event_<call>);
434 * if (!ret)
435 * pr_info("event trace: Could not activate trace point "
436 * "probe to <call>");
437 * return ret;
438 * }
439 *
440 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
441 * {
442 * unregister_trace_<call>(ftrace_event_<call>);
443 * }
444 *
445 *
446 * For those macros defined with TRACE_EVENT:
447 *
448 * static struct ftrace_event_call event_<call>;
449 *
450 * static void ftrace_raw_event_<call>(proto)
451 * {
452 * struct ring_buffer_event *event;
453 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
454 * struct ring_buffer *buffer;
455 * unsigned long irq_flags;
456 * int pc;
457 *
458 * local_save_flags(irq_flags);
459 * pc = preempt_count();
460 *
461 * event = trace_current_buffer_lock_reserve(&buffer,
462 * event_<call>.id,
463 * sizeof(struct ftrace_raw_<call>),
464 * irq_flags, pc);
465 * if (!event)
466 * return;
467 * entry = ring_buffer_event_data(event);
468 *
469 * <assign>; <-- Here we assign the entries by the __field and
470 * __array macros.
471 *
472 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
473 * }
474 *
475 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
476 * {
477 * int ret;
478 *
479 * ret = register_trace_<call>(ftrace_raw_event_<call>);
480 * if (!ret)
481 * pr_info("event trace: Could not activate trace point "
482 * "probe to <call>");
483 * return ret;
484 * }
485 *
486 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
487 * {
488 * unregister_trace_<call>(ftrace_raw_event_<call>);
489 * }
490 *
491 * static struct trace_event ftrace_event_type_<call> = {
492 * .trace = ftrace_raw_output_<call>, <-- stage 2
493 * };
494 *
495 * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused)
496 * {
497 * int id;
498 *
499 * id = register_ftrace_event(&ftrace_event_type_<call>);
500 * if (!id)
501 * return -ENODEV;
502 * event_<call>.id = id;
503 * return 0;
504 * }
505 *
506 * static struct ftrace_event_call __used
507 * __attribute__((__aligned__(4)))
508 * __attribute__((section("_ftrace_events"))) event_<call> = {
509 * .name = "<call>",
510 * .system = "<system>",
511 * .raw_init = ftrace_raw_init_event_<call>,
512 * .regfunc = ftrace_reg_event_<call>,
513 * .unregfunc = ftrace_unreg_event_<call>,
514 * .show_format = ftrace_format_<call>,
515 * }
516 *
517 */
518
519 #undef TP_FMT
520 #define TP_FMT(fmt, args...) fmt "\n", ##args
521
522 #ifdef CONFIG_EVENT_PROFILE
523
524 #define _TRACE_PROFILE_INIT(call) \
525 .profile_count = ATOMIC_INIT(-1), \
526 .profile_enable = ftrace_profile_enable_##call, \
527 .profile_disable = ftrace_profile_disable_##call,
528
529 #else
530 #define _TRACE_PROFILE_INIT(call)
531 #endif
532
533 #undef __entry
534 #define __entry entry
535
536 #undef __field
537 #define __field(type, item)
538
539 #undef __array
540 #define __array(type, item, len)
541
542 #undef __dynamic_array
543 #define __dynamic_array(type, item, len) \
544 __entry->__data_loc_##item = __data_offsets.item;
545
546 #undef __string
547 #define __string(item, src) __dynamic_array(char, item, -1) \
548
549 #undef __assign_str
550 #define __assign_str(dst, src) \
551 strcpy(__get_str(dst), src);
552
553 #undef TRACE_EVENT
554 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
555 \
556 static struct ftrace_event_call event_##call; \
557 \
558 static void ftrace_raw_event_##call(proto) \
559 { \
560 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
561 struct ftrace_event_call *event_call = &event_##call; \
562 struct ring_buffer_event *event; \
563 struct ftrace_raw_##call *entry; \
564 struct ring_buffer *buffer; \
565 unsigned long irq_flags; \
566 int __data_size; \
567 int pc; \
568 \
569 local_save_flags(irq_flags); \
570 pc = preempt_count(); \
571 \
572 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
573 \
574 event = trace_current_buffer_lock_reserve(&buffer, \
575 event_##call.id, \
576 sizeof(*entry) + __data_size, \
577 irq_flags, pc); \
578 if (!event) \
579 return; \
580 entry = ring_buffer_event_data(event); \
581 \
582 \
583 tstruct \
584 \
585 { assign; } \
586 \
587 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
588 trace_nowake_buffer_unlock_commit(buffer, \
589 event, irq_flags, pc); \
590 } \
591 \
592 static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
593 { \
594 int ret; \
595 \
596 ret = register_trace_##call(ftrace_raw_event_##call); \
597 if (ret) \
598 pr_info("event trace: Could not activate trace point " \
599 "probe to " #call "\n"); \
600 return ret; \
601 } \
602 \
603 static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
604 { \
605 unregister_trace_##call(ftrace_raw_event_##call); \
606 } \
607 \
608 static struct trace_event ftrace_event_type_##call = { \
609 .trace = ftrace_raw_output_##call, \
610 }; \
611 \
612 static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
613 { \
614 int id; \
615 \
616 id = register_ftrace_event(&ftrace_event_type_##call); \
617 if (!id) \
618 return -ENODEV; \
619 event_##call.id = id; \
620 INIT_LIST_HEAD(&event_##call.fields); \
621 return 0; \
622 } \
623 \
624 static struct ftrace_event_call __used \
625 __attribute__((__aligned__(4))) \
626 __attribute__((section("_ftrace_events"))) event_##call = { \
627 .name = #call, \
628 .system = __stringify(TRACE_SYSTEM), \
629 .event = &ftrace_event_type_##call, \
630 .raw_init = ftrace_raw_init_event_##call, \
631 .regfunc = ftrace_raw_reg_event_##call, \
632 .unregfunc = ftrace_raw_unreg_event_##call, \
633 .show_format = ftrace_format_##call, \
634 .define_fields = ftrace_define_fields_##call, \
635 _TRACE_PROFILE_INIT(call) \
636 }
637
638 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
639
640 /*
641 * Define the insertion callback to profile events
642 *
643 * The job is very similar to ftrace_raw_event_<call> except that we don't
644 * insert in the ring buffer but in a perf counter.
645 *
646 * static void ftrace_profile_<call>(proto)
647 * {
648 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
649 * struct ftrace_event_call *event_call = &event_<call>;
650 * extern void perf_tp_event(int, u64, u64, void *, int);
651 * struct ftrace_raw_##call *entry;
652 * u64 __addr = 0, __count = 1;
653 * unsigned long irq_flags;
654 * struct trace_entry *ent;
655 * int __entry_size;
656 * int __data_size;
657 * int __cpu
658 * int pc;
659 *
660 * pc = preempt_count();
661 *
662 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
663 *
664 * // Below we want to get the aligned size by taking into account
665 * // the u32 field that will later store the buffer size
666 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
667 * sizeof(u64));
668 * __entry_size -= sizeof(u32);
669 *
670 * // Protect the non nmi buffer
671 * // This also protects the rcu read side
672 * local_irq_save(irq_flags);
673 * __cpu = smp_processor_id();
674 *
675 * if (in_nmi())
676 * raw_data = rcu_dereference(trace_profile_buf_nmi);
677 * else
678 * raw_data = rcu_dereference(trace_profile_buf);
679 *
680 * if (!raw_data)
681 * goto end;
682 *
683 * raw_data = per_cpu_ptr(raw_data, __cpu);
684 *
685 * //zero dead bytes from alignment to avoid stack leak to userspace:
686 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
687 * entry = (struct ftrace_raw_<call> *)raw_data;
688 * ent = &entry->ent;
689 * tracing_generic_entry_update(ent, irq_flags, pc);
690 * ent->type = event_call->id;
691 *
692 * <tstruct> <- do some jobs with dynamic arrays
693 *
694 * <assign> <- affect our values
695 *
696 * perf_tp_event(event_call->id, __addr, __count, entry,
697 * __entry_size); <- submit them to perf counter
698 *
699 * }
700 */
701
702 #ifdef CONFIG_EVENT_PROFILE
703
704 #undef __perf_addr
705 #define __perf_addr(a) __addr = (a)
706
707 #undef __perf_count
708 #define __perf_count(c) __count = (c)
709
710 #undef TRACE_EVENT
711 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
712 static void ftrace_profile_##call(proto) \
713 { \
714 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
715 struct ftrace_event_call *event_call = &event_##call; \
716 extern void perf_tp_event(int, u64, u64, void *, int); \
717 struct ftrace_raw_##call *entry; \
718 u64 __addr = 0, __count = 1; \
719 unsigned long irq_flags; \
720 struct trace_entry *ent; \
721 int __entry_size; \
722 int __data_size; \
723 char *raw_data; \
724 int __cpu; \
725 int pc; \
726 \
727 pc = preempt_count(); \
728 \
729 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
730 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
731 sizeof(u64)); \
732 __entry_size -= sizeof(u32); \
733 \
734 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
735 "profile buffer not large enough")) \
736 return; \
737 \
738 local_irq_save(irq_flags); \
739 __cpu = smp_processor_id(); \
740 \
741 if (in_nmi()) \
742 raw_data = rcu_dereference(trace_profile_buf_nmi); \
743 else \
744 raw_data = rcu_dereference(trace_profile_buf); \
745 \
746 if (!raw_data) \
747 goto end; \
748 \
749 raw_data = per_cpu_ptr(raw_data, __cpu); \
750 \
751 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
752 entry = (struct ftrace_raw_##call *)raw_data; \
753 ent = &entry->ent; \
754 tracing_generic_entry_update(ent, irq_flags, pc); \
755 ent->type = event_call->id; \
756 \
757 tstruct \
758 \
759 { assign; } \
760 \
761 perf_tp_event(event_call->id, __addr, __count, entry, \
762 __entry_size); \
763 \
764 end: \
765 local_irq_restore(irq_flags); \
766 \
767 }
768
769 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
770 #endif /* CONFIG_EVENT_PROFILE */
771
772 #undef _TRACE_PROFILE_INIT
773
This page took 0.06111 seconds and 5 git commands to generate.