2 #ifndef _LINUX_FTRACE_EVENT_H
3 #define _LINUX_FTRACE_EVENT_H
5 #include <linux/ring_buffer.h>
6 #include <linux/trace_seq.h>
7 #include <linux/percpu.h>
8 #include <linux/hardirq.h>
9 #include <linux/perf_event.h>
10 #include <linux/tracepoint.h>
17 struct trace_print_flags
{
22 struct trace_print_flags_u64
{
23 unsigned long long mask
;
27 const char *ftrace_print_flags_seq(struct trace_seq
*p
, const char *delim
,
29 const struct trace_print_flags
*flag_array
);
31 const char *ftrace_print_symbols_seq(struct trace_seq
*p
, unsigned long val
,
32 const struct trace_print_flags
*symbol_array
);
34 #if BITS_PER_LONG == 32
35 const char *ftrace_print_symbols_seq_u64(struct trace_seq
*p
,
36 unsigned long long val
,
37 const struct trace_print_flags_u64
41 const char *ftrace_print_bitmask_seq(struct trace_seq
*p
, void *bitmask_ptr
,
42 unsigned int bitmask_size
);
44 const char *ftrace_print_hex_seq(struct trace_seq
*p
,
45 const unsigned char *buf
, int len
);
47 const char *ftrace_print_array_seq(struct trace_seq
*p
,
48 const void *buf
, int buf_len
,
51 struct trace_iterator
;
54 int ftrace_raw_output_prep(struct trace_iterator
*iter
,
55 struct trace_event
*event
);
58 * The trace entry - the most basic unit of tracing. This is what
59 * is printed in the end as a single line in the trace output, such as:
61 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
66 unsigned char preempt_count
;
70 #define FTRACE_MAX_EVENT \
71 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
74 * Trace iterator - used by printout routines who present trace
75 * results to users and which routines might sleep, etc:
77 struct trace_iterator
{
78 struct trace_array
*tr
;
80 struct trace_buffer
*trace_buffer
;
84 struct ring_buffer_iter
**buffer_iter
;
85 unsigned long iter_flags
;
87 /* trace_seq for __print_flags() and __print_symbolic() etc. */
88 struct trace_seq tmp_seq
;
90 cpumask_var_t started
;
92 /* it's true when current open file is snapshot */
95 /* The below is zeroed out in pipe_read */
97 struct trace_entry
*ent
;
98 unsigned long lost_events
;
107 /* All new field here will be zeroed out in pipe_read */
110 enum trace_iter_flags
{
111 TRACE_FILE_LAT_FMT
= 1,
112 TRACE_FILE_ANNOTATE
= 2,
113 TRACE_FILE_TIME_IN_NS
= 4,
117 typedef enum print_line_t (*trace_print_func
)(struct trace_iterator
*iter
,
118 int flags
, struct trace_event
*event
);
120 struct trace_event_functions
{
121 trace_print_func trace
;
122 trace_print_func raw
;
123 trace_print_func hex
;
124 trace_print_func binary
;
128 struct hlist_node node
;
129 struct list_head list
;
131 struct trace_event_functions
*funcs
;
134 extern int register_ftrace_event(struct trace_event
*event
);
135 extern int unregister_ftrace_event(struct trace_event
*event
);
137 /* Return values for print_line callback */
139 TRACE_TYPE_PARTIAL_LINE
= 0, /* Retry after flushing the seq */
140 TRACE_TYPE_HANDLED
= 1,
141 TRACE_TYPE_UNHANDLED
= 2, /* Relay to other output functions */
142 TRACE_TYPE_NO_CONSUME
= 3 /* Handled but ask to not consume */
146 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
147 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
148 * simplifies those functions and keeps them in sync.
150 static inline enum print_line_t
trace_handle_return(struct trace_seq
*s
)
152 return trace_seq_has_overflowed(s
) ?
153 TRACE_TYPE_PARTIAL_LINE
: TRACE_TYPE_HANDLED
;
156 void tracing_generic_entry_update(struct trace_entry
*entry
,
159 struct ftrace_event_file
;
161 struct ring_buffer_event
*
162 trace_event_buffer_lock_reserve(struct ring_buffer
**current_buffer
,
163 struct ftrace_event_file
*ftrace_file
,
164 int type
, unsigned long len
,
165 unsigned long flags
, int pc
);
166 struct ring_buffer_event
*
167 trace_current_buffer_lock_reserve(struct ring_buffer
**current_buffer
,
168 int type
, unsigned long len
,
169 unsigned long flags
, int pc
);
170 void trace_current_buffer_unlock_commit(struct ring_buffer
*buffer
,
171 struct ring_buffer_event
*event
,
172 unsigned long flags
, int pc
);
173 void trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
174 struct ring_buffer_event
*event
,
175 unsigned long flags
, int pc
);
176 void trace_buffer_unlock_commit_regs(struct ring_buffer
*buffer
,
177 struct ring_buffer_event
*event
,
178 unsigned long flags
, int pc
,
179 struct pt_regs
*regs
);
180 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
181 struct ring_buffer_event
*event
);
183 void tracing_record_cmdline(struct task_struct
*tsk
);
185 int ftrace_output_call(struct trace_iterator
*iter
, char *name
, char *fmt
, ...);
191 TRACE_REG_UNREGISTER
,
192 #ifdef CONFIG_PERF_EVENTS
193 TRACE_REG_PERF_REGISTER
,
194 TRACE_REG_PERF_UNREGISTER
,
196 TRACE_REG_PERF_CLOSE
,
202 struct ftrace_event_call
;
204 struct ftrace_event_class
{
207 #ifdef CONFIG_PERF_EVENTS
210 int (*reg
)(struct ftrace_event_call
*event
,
211 enum trace_reg type
, void *data
);
212 int (*define_fields
)(struct ftrace_event_call
*);
213 struct list_head
*(*get_fields
)(struct ftrace_event_call
*);
214 struct list_head fields
;
215 int (*raw_init
)(struct ftrace_event_call
*);
218 extern int ftrace_event_reg(struct ftrace_event_call
*event
,
219 enum trace_reg type
, void *data
);
221 int ftrace_output_event(struct trace_iterator
*iter
, struct ftrace_event_call
*event
,
224 int ftrace_event_define_field(struct ftrace_event_call
*call
,
225 char *type
, int len
, char *item
, int offset
,
226 int field_size
, int sign
, int filter
);
228 struct ftrace_event_buffer
{
229 struct ring_buffer
*buffer
;
230 struct ring_buffer_event
*event
;
231 struct ftrace_event_file
*ftrace_file
;
237 void *ftrace_event_buffer_reserve(struct ftrace_event_buffer
*fbuffer
,
238 struct ftrace_event_file
*ftrace_file
,
241 void ftrace_event_buffer_commit(struct ftrace_event_buffer
*fbuffer
);
243 int ftrace_event_define_field(struct ftrace_event_call
*call
,
244 char *type
, int len
, char *item
, int offset
,
245 int field_size
, int sign
, int filter
);
248 TRACE_EVENT_FL_FILTERED_BIT
,
249 TRACE_EVENT_FL_CAP_ANY_BIT
,
250 TRACE_EVENT_FL_NO_SET_FILTER_BIT
,
251 TRACE_EVENT_FL_IGNORE_ENABLE_BIT
,
252 TRACE_EVENT_FL_WAS_ENABLED_BIT
,
253 TRACE_EVENT_FL_USE_CALL_FILTER_BIT
,
254 TRACE_EVENT_FL_TRACEPOINT_BIT
,
259 * FILTERED - The event has a filter attached
260 * CAP_ANY - Any user can enable for perf
261 * NO_SET_FILTER - Set when filter has error and is to be ignored
262 * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
263 * WAS_ENABLED - Set and stays set when an event was ever enabled
264 * (used for module unloading, if a module event is enabled,
265 * it is best to clear the buffers that used it).
266 * USE_CALL_FILTER - For ftrace internal events, don't use file filter
267 * TRACEPOINT - Event is a tracepoint
270 TRACE_EVENT_FL_FILTERED
= (1 << TRACE_EVENT_FL_FILTERED_BIT
),
271 TRACE_EVENT_FL_CAP_ANY
= (1 << TRACE_EVENT_FL_CAP_ANY_BIT
),
272 TRACE_EVENT_FL_NO_SET_FILTER
= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT
),
273 TRACE_EVENT_FL_IGNORE_ENABLE
= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT
),
274 TRACE_EVENT_FL_WAS_ENABLED
= (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT
),
275 TRACE_EVENT_FL_USE_CALL_FILTER
= (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT
),
276 TRACE_EVENT_FL_TRACEPOINT
= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT
),
279 struct ftrace_event_call
{
280 struct list_head list
;
281 struct ftrace_event_class
*class;
284 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
285 struct tracepoint
*tp
;
287 struct trace_event event
;
288 const char *print_fmt
;
289 struct event_filter
*filter
;
293 * bit 0: filter_active
294 * bit 1: allow trace by non root (cap any)
295 * bit 2: failed to apply filter
296 * bit 3: ftrace internal event (do not enable)
297 * bit 4: Event was enabled by module
298 * bit 5: use call filter rather than file filter
299 * bit 6: Event is a tracepoint
301 int flags
; /* static flags of different events */
303 #ifdef CONFIG_PERF_EVENTS
305 struct hlist_head __percpu
*perf_events
;
307 int (*perf_perm
)(struct ftrace_event_call
*,
308 struct perf_event
*);
312 static inline const char *
313 ftrace_event_name(struct ftrace_event_call
*call
)
315 if (call
->flags
& TRACE_EVENT_FL_TRACEPOINT
)
316 return call
->tp
? call
->tp
->name
: NULL
;
322 struct ftrace_subsystem_dir
;
325 FTRACE_EVENT_FL_ENABLED_BIT
,
326 FTRACE_EVENT_FL_RECORDED_CMD_BIT
,
327 FTRACE_EVENT_FL_FILTERED_BIT
,
328 FTRACE_EVENT_FL_NO_SET_FILTER_BIT
,
329 FTRACE_EVENT_FL_SOFT_MODE_BIT
,
330 FTRACE_EVENT_FL_SOFT_DISABLED_BIT
,
331 FTRACE_EVENT_FL_TRIGGER_MODE_BIT
,
332 FTRACE_EVENT_FL_TRIGGER_COND_BIT
,
336 * Ftrace event file flags:
337 * ENABLED - The event is enabled
338 * RECORDED_CMD - The comms should be recorded at sched_switch
339 * FILTERED - The event has a filter attached
340 * NO_SET_FILTER - Set when filter has error and is to be ignored
341 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
342 * SOFT_DISABLED - When set, do not trace the event (even though its
343 * tracepoint may be enabled)
344 * TRIGGER_MODE - When set, invoke the triggers associated with the event
345 * TRIGGER_COND - When set, one or more triggers has an associated filter
348 FTRACE_EVENT_FL_ENABLED
= (1 << FTRACE_EVENT_FL_ENABLED_BIT
),
349 FTRACE_EVENT_FL_RECORDED_CMD
= (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT
),
350 FTRACE_EVENT_FL_FILTERED
= (1 << FTRACE_EVENT_FL_FILTERED_BIT
),
351 FTRACE_EVENT_FL_NO_SET_FILTER
= (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT
),
352 FTRACE_EVENT_FL_SOFT_MODE
= (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT
),
353 FTRACE_EVENT_FL_SOFT_DISABLED
= (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT
),
354 FTRACE_EVENT_FL_TRIGGER_MODE
= (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT
),
355 FTRACE_EVENT_FL_TRIGGER_COND
= (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT
),
358 struct ftrace_event_file
{
359 struct list_head list
;
360 struct ftrace_event_call
*event_call
;
361 struct event_filter
*filter
;
363 struct trace_array
*tr
;
364 struct ftrace_subsystem_dir
*system
;
365 struct list_head triggers
;
370 * bit 1: enabled cmd record
371 * bit 2: enable/disable with the soft disable bit
372 * bit 3: soft disabled
373 * bit 4: trigger enabled
375 * Note: The bits must be set atomically to prevent races
376 * from other writers. Reads of flags do not need to be in
377 * sync as they occur in critical sections. But the way flags
378 * is currently used, these changes do not affect the code
379 * except that when a change is made, it may have a slight
380 * delay in propagating the changes to other CPUs due to
381 * caching and such. Which is mostly OK ;-)
384 atomic_t sm_ref
; /* soft-mode reference counter */
385 atomic_t tm_ref
; /* trigger-mode reference counter */
388 #define __TRACE_EVENT_FLAGS(name, value) \
389 static int __init trace_init_flags_##name(void) \
391 event_##name.flags |= value; \
394 early_initcall(trace_init_flags_##name);
396 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
397 static int perf_perm_##name(struct ftrace_event_call *tp_event, \
398 struct perf_event *p_event) \
400 return ({ expr; }); \
402 static int __init trace_init_perf_perm_##name(void) \
404 event_##name.perf_perm = &perf_perm_##name; \
407 early_initcall(trace_init_perf_perm_##name);
409 #define PERF_MAX_TRACE_SIZE 2048
411 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
413 enum event_trigger_type
{
415 ETT_TRACE_ONOFF
= (1 << 0),
416 ETT_SNAPSHOT
= (1 << 1),
417 ETT_STACKTRACE
= (1 << 2),
418 ETT_EVENT_ENABLE
= (1 << 3),
421 extern int filter_match_preds(struct event_filter
*filter
, void *rec
);
423 extern int filter_check_discard(struct ftrace_event_file
*file
, void *rec
,
424 struct ring_buffer
*buffer
,
425 struct ring_buffer_event
*event
);
426 extern int call_filter_check_discard(struct ftrace_event_call
*call
, void *rec
,
427 struct ring_buffer
*buffer
,
428 struct ring_buffer_event
*event
);
429 extern enum event_trigger_type
event_triggers_call(struct ftrace_event_file
*file
,
431 extern void event_triggers_post_call(struct ftrace_event_file
*file
,
432 enum event_trigger_type tt
);
435 * ftrace_trigger_soft_disabled - do triggers and test if soft disabled
436 * @file: The file pointer of the event to test
438 * If any triggers without filters are attached to this event, they
439 * will be called here. If the event is soft disabled and has no
440 * triggers that require testing the fields, it will return true,
444 ftrace_trigger_soft_disabled(struct ftrace_event_file
*file
)
446 unsigned long eflags
= file
->flags
;
448 if (!(eflags
& FTRACE_EVENT_FL_TRIGGER_COND
)) {
449 if (eflags
& FTRACE_EVENT_FL_TRIGGER_MODE
)
450 event_triggers_call(file
, NULL
);
451 if (eflags
& FTRACE_EVENT_FL_SOFT_DISABLED
)
458 * Helper function for event_trigger_unlock_commit{_regs}().
459 * If there are event triggers attached to this event that requires
460 * filtering against its fields, then they wil be called as the
461 * entry already holds the field information of the current event.
463 * It also checks if the event should be discarded or not.
464 * It is to be discarded if the event is soft disabled and the
465 * event was only recorded to process triggers, or if the event
466 * filter is active and this event did not match the filters.
468 * Returns true if the event is discarded, false otherwise.
471 __event_trigger_test_discard(struct ftrace_event_file
*file
,
472 struct ring_buffer
*buffer
,
473 struct ring_buffer_event
*event
,
475 enum event_trigger_type
*tt
)
477 unsigned long eflags
= file
->flags
;
479 if (eflags
& FTRACE_EVENT_FL_TRIGGER_COND
)
480 *tt
= event_triggers_call(file
, entry
);
482 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT
, &file
->flags
))
483 ring_buffer_discard_commit(buffer
, event
);
484 else if (!filter_check_discard(file
, entry
, buffer
, event
))
491 * event_trigger_unlock_commit - handle triggers and finish event commit
492 * @file: The file pointer assoctiated to the event
493 * @buffer: The ring buffer that the event is being written to
494 * @event: The event meta data in the ring buffer
495 * @entry: The event itself
496 * @irq_flags: The state of the interrupts at the start of the event
497 * @pc: The state of the preempt count at the start of the event.
499 * This is a helper function to handle triggers that require data
500 * from the event itself. It also tests the event against filters and
501 * if the event is soft disabled and should be discarded.
504 event_trigger_unlock_commit(struct ftrace_event_file
*file
,
505 struct ring_buffer
*buffer
,
506 struct ring_buffer_event
*event
,
507 void *entry
, unsigned long irq_flags
, int pc
)
509 enum event_trigger_type tt
= ETT_NONE
;
511 if (!__event_trigger_test_discard(file
, buffer
, event
, entry
, &tt
))
512 trace_buffer_unlock_commit(buffer
, event
, irq_flags
, pc
);
515 event_triggers_post_call(file
, tt
);
519 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
520 * @file: The file pointer assoctiated to the event
521 * @buffer: The ring buffer that the event is being written to
522 * @event: The event meta data in the ring buffer
523 * @entry: The event itself
524 * @irq_flags: The state of the interrupts at the start of the event
525 * @pc: The state of the preempt count at the start of the event.
527 * This is a helper function to handle triggers that require data
528 * from the event itself. It also tests the event against filters and
529 * if the event is soft disabled and should be discarded.
531 * Same as event_trigger_unlock_commit() but calls
532 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
535 event_trigger_unlock_commit_regs(struct ftrace_event_file
*file
,
536 struct ring_buffer
*buffer
,
537 struct ring_buffer_event
*event
,
538 void *entry
, unsigned long irq_flags
, int pc
,
539 struct pt_regs
*regs
)
541 enum event_trigger_type tt
= ETT_NONE
;
543 if (!__event_trigger_test_discard(file
, buffer
, event
, entry
, &tt
))
544 trace_buffer_unlock_commit_regs(buffer
, event
,
545 irq_flags
, pc
, regs
);
548 event_triggers_post_call(file
, tt
);
553 FILTER_STATIC_STRING
,
559 extern int trace_event_raw_init(struct ftrace_event_call
*call
);
560 extern int trace_define_field(struct ftrace_event_call
*call
, const char *type
,
561 const char *name
, int offset
, int size
,
562 int is_signed
, int filter_type
);
563 extern int trace_add_event_call(struct ftrace_event_call
*call
);
564 extern int trace_remove_event_call(struct ftrace_event_call
*call
);
566 #define is_signed_type(type) (((type)(-1)) < (type)1)
568 int trace_set_clr_event(const char *system
, const char *event
, int set
);
571 * The double __builtin_constant_p is because gcc will give us an error
572 * if we try to allocate the static variable to fmt if it is not a
573 * constant. Even with the outer if statement optimizing out.
575 #define event_trace_printk(ip, fmt, args...) \
577 __trace_printk_check_format(fmt, ##args); \
578 tracing_record_cmdline(current); \
579 if (__builtin_constant_p(fmt)) { \
580 static const char *trace_printk_fmt \
581 __attribute__((section("__trace_printk_fmt"))) = \
582 __builtin_constant_p(fmt) ? fmt : NULL; \
584 __trace_bprintk(ip, trace_printk_fmt, ##args); \
586 __trace_printk(ip, fmt, ##args); \
589 #ifdef CONFIG_PERF_EVENTS
592 DECLARE_PER_CPU(struct pt_regs
, perf_trace_regs
);
594 extern int perf_trace_init(struct perf_event
*event
);
595 extern void perf_trace_destroy(struct perf_event
*event
);
596 extern int perf_trace_add(struct perf_event
*event
, int flags
);
597 extern void perf_trace_del(struct perf_event
*event
, int flags
);
598 extern int ftrace_profile_set_filter(struct perf_event
*event
, int event_id
,
600 extern void ftrace_profile_free_filter(struct perf_event
*event
);
601 extern void *perf_trace_buf_prepare(int size
, unsigned short type
,
602 struct pt_regs
**regs
, int *rctxp
);
605 perf_trace_buf_submit(void *raw_data
, int size
, int rctx
, u64 addr
,
606 u64 count
, struct pt_regs
*regs
, void *head
,
607 struct task_struct
*task
)
609 perf_tp_event(addr
, count
, raw_data
, size
, regs
, head
, rctx
, task
);
613 #endif /* _LINUX_FTRACE_EVENT_H */