1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/ftrace.h>
11 #include <trace/boot.h>
12 #include <linux/kmemtrace.h>
13 #include <trace/power.h>
15 #include <linux/trace_seq.h>
16 #include <linux/ftrace_event.h>
18 #ifdef CONFIG_KSYM_TRACER
19 #include <asm/hw_breakpoint.h>
23 __TRACE_FIRST_TYPE
= 0,
51 * Function trace entry - function address and parent function addres:
54 struct trace_entry ent
;
56 unsigned long parent_ip
;
59 /* Function call entry */
60 struct ftrace_graph_ent_entry
{
61 struct trace_entry ent
;
62 struct ftrace_graph_ent graph_ent
;
65 /* Function return entry */
66 struct ftrace_graph_ret_entry
{
67 struct trace_entry ent
;
68 struct ftrace_graph_ret ret
;
70 extern struct tracer boot_tracer
;
73 * Context switch trace entry - which task (and prio) we switched from/to:
75 struct ctx_switch_entry
{
76 struct trace_entry ent
;
77 unsigned int prev_pid
;
78 unsigned char prev_prio
;
79 unsigned char prev_state
;
80 unsigned int next_pid
;
81 unsigned char next_prio
;
82 unsigned char next_state
;
83 unsigned int next_cpu
;
87 * Special (free-form) trace entry:
89 struct special_entry
{
90 struct trace_entry ent
;
100 #define FTRACE_STACK_ENTRIES 8
103 struct trace_entry ent
;
104 unsigned long caller
[FTRACE_STACK_ENTRIES
];
107 struct userstack_entry
{
108 struct trace_entry ent
;
109 unsigned long caller
[FTRACE_STACK_ENTRIES
];
113 * trace_printk entry:
115 struct bprint_entry
{
116 struct trace_entry ent
;
123 struct trace_entry ent
;
128 #define TRACE_OLD_SIZE 88
130 struct trace_field_cont
{
132 /* Temporary till we get rid of this completely */
133 char buf
[TRACE_OLD_SIZE
- 1];
136 struct trace_mmiotrace_rw
{
137 struct trace_entry ent
;
138 struct mmiotrace_rw rw
;
141 struct trace_mmiotrace_map
{
142 struct trace_entry ent
;
143 struct mmiotrace_map map
;
146 struct trace_boot_call
{
147 struct trace_entry ent
;
148 struct boot_trace_call boot_call
;
151 struct trace_boot_ret
{
152 struct trace_entry ent
;
153 struct boot_trace_ret boot_ret
;
156 #define TRACE_FUNC_SIZE 30
157 #define TRACE_FILE_SIZE 20
158 struct trace_branch
{
159 struct trace_entry ent
;
161 char func
[TRACE_FUNC_SIZE
+1];
162 char file
[TRACE_FILE_SIZE
+1];
166 struct hw_branch_entry
{
167 struct trace_entry ent
;
173 struct trace_entry ent
;
174 struct power_trace state_data
;
177 enum kmemtrace_type_id
{
178 KMEMTRACE_TYPE_KMALLOC
= 0, /* kmalloc() or kfree(). */
179 KMEMTRACE_TYPE_CACHE
, /* kmem_cache_*(). */
180 KMEMTRACE_TYPE_PAGES
, /* __get_free_pages() and friends. */
183 struct kmemtrace_alloc_entry
{
184 struct trace_entry ent
;
185 enum kmemtrace_type_id type_id
;
186 unsigned long call_site
;
194 struct kmemtrace_free_entry
{
195 struct trace_entry ent
;
196 enum kmemtrace_type_id type_id
;
197 unsigned long call_site
;
201 struct syscall_trace_enter
{
202 struct trace_entry ent
;
204 unsigned long args
[];
207 struct syscall_trace_exit
{
208 struct trace_entry ent
;
213 #define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy"
214 extern int process_new_ksym_entry(char *ksymname
, int op
, unsigned long addr
);
216 struct ksym_trace_entry
{
217 struct trace_entry ent
;
220 char ksym_name
[KSYM_NAME_LEN
];
221 char cmd
[TASK_COMM_LEN
];
225 * trace_flag_type is an enumeration that holds different
226 * states when a trace occurs. These are:
227 * IRQS_OFF - interrupts were disabled
228 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
229 * NEED_RESCED - reschedule is requested
230 * HARDIRQ - inside an interrupt handler
231 * SOFTIRQ - inside a softirq handler
233 enum trace_flag_type
{
234 TRACE_FLAG_IRQS_OFF
= 0x01,
235 TRACE_FLAG_IRQS_NOSUPPORT
= 0x02,
236 TRACE_FLAG_NEED_RESCHED
= 0x04,
237 TRACE_FLAG_HARDIRQ
= 0x08,
238 TRACE_FLAG_SOFTIRQ
= 0x10,
241 #define TRACE_BUF_SIZE 1024
244 * The CPU trace array - it consists of thousands of trace entries
245 * plus some other descriptor data: (for example which task started
248 struct trace_array_cpu
{
250 void *buffer_page
; /* ring buffer spare */
252 unsigned long saved_latency
;
253 unsigned long critical_start
;
254 unsigned long critical_end
;
255 unsigned long critical_sequence
;
257 unsigned long policy
;
258 unsigned long rt_priority
;
259 unsigned long skipped_entries
;
260 cycle_t preempt_timestamp
;
263 char comm
[TASK_COMM_LEN
];
267 * The trace array - an array of per-CPU trace arrays. This is the
268 * highest level data structure that individual tracers deal with.
269 * They have on/off state as well:
272 struct ring_buffer
*buffer
;
273 unsigned long entries
;
276 struct task_struct
*waiter
;
277 struct trace_array_cpu
*data
[NR_CPUS
];
280 #define FTRACE_CMP_TYPE(var, type) \
281 __builtin_types_compatible_p(typeof(var), type *)
284 #define IF_ASSIGN(var, entry, etype, id) \
285 if (FTRACE_CMP_TYPE(var, etype)) { \
286 var = (typeof(var))(entry); \
287 WARN_ON(id && (entry)->type != id); \
291 /* Will cause compile errors if type is not found. */
292 extern void __ftrace_bad_type(void);
295 * The trace_assign_type is a verifier that the entry type is
296 * the same as the type being assigned. To add new types simply
297 * add a line with the following format:
299 * IF_ASSIGN(var, ent, type, id);
301 * Where "type" is the trace type that includes the trace_entry
302 * as the "ent" item. And "id" is the trace identifier that is
303 * used in the trace_type enum.
305 * If the type can have more than one id, then use zero.
307 #define trace_assign_type(var, ent) \
309 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
310 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
311 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
312 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
313 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
314 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
315 IF_ASSIGN(var, ent, struct special_entry, 0); \
316 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
318 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
320 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
321 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
322 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
323 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
325 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
327 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
328 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
329 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
331 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
333 IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\
334 __ftrace_bad_type(); \
338 * An option specific to a tracer. This is a boolean value.
339 * The bit is the bit index that sets its value on the
340 * flags value in struct tracer_flags.
343 const char *name
; /* Will appear on the trace_options file */
344 u32 bit
; /* Mask assigned in val field in tracer_flags */
348 * The set of specific options for a tracer. Your tracer
349 * have to set the initial value of the flags val.
351 struct tracer_flags
{
353 struct tracer_opt
*opts
;
356 /* Makes more easy to define a tracer opt */
357 #define TRACER_OPT(s, b) .name = #s, .bit = b
361 * struct tracer - a specific tracer and its callbacks to interact with debugfs
362 * @name: the name chosen to select it on the available_tracers file
363 * @init: called when one switches to this tracer (echo name > current_tracer)
364 * @reset: called when one switches to another tracer
365 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
366 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
367 * @open: called when the trace file is opened
368 * @pipe_open: called when the trace_pipe file is opened
369 * @wait_pipe: override how the user waits for traces on trace_pipe
370 * @close: called when the trace file is released
371 * @read: override the default read callback on trace_pipe
372 * @splice_read: override the default splice_read callback on trace_pipe
373 * @selftest: selftest to run on boot (see trace_selftest.c)
374 * @print_headers: override the first lines that describe your columns
375 * @print_line: callback that prints a trace
376 * @set_flag: signals one of your private flags changed (trace_options file)
377 * @flags: your private flags
381 int (*init
)(struct trace_array
*tr
);
382 void (*reset
)(struct trace_array
*tr
);
383 void (*start
)(struct trace_array
*tr
);
384 void (*stop
)(struct trace_array
*tr
);
385 void (*open
)(struct trace_iterator
*iter
);
386 void (*pipe_open
)(struct trace_iterator
*iter
);
387 void (*wait_pipe
)(struct trace_iterator
*iter
);
388 void (*close
)(struct trace_iterator
*iter
);
389 ssize_t (*read
)(struct trace_iterator
*iter
,
390 struct file
*filp
, char __user
*ubuf
,
391 size_t cnt
, loff_t
*ppos
);
392 ssize_t (*splice_read
)(struct trace_iterator
*iter
,
395 struct pipe_inode_info
*pipe
,
398 #ifdef CONFIG_FTRACE_STARTUP_TEST
399 int (*selftest
)(struct tracer
*trace
,
400 struct trace_array
*tr
);
402 void (*print_header
)(struct seq_file
*m
);
403 enum print_line_t (*print_line
)(struct trace_iterator
*iter
);
404 /* If you handled the flag setting, return 0 */
405 int (*set_flag
)(u32 old_flags
, u32 bit
, int set
);
408 struct tracer_flags
*flags
;
409 struct tracer_stat
*stats
;
413 #define TRACE_PIPE_ALL_CPU -1
415 int tracer_init(struct tracer
*t
, struct trace_array
*tr
);
416 int tracing_is_enabled(void);
417 void trace_wake_up(void);
418 void tracing_reset(struct trace_array
*tr
, int cpu
);
419 void tracing_reset_online_cpus(struct trace_array
*tr
);
420 void tracing_reset_current(int cpu
);
421 void tracing_reset_current_online_cpus(void);
422 int tracing_open_generic(struct inode
*inode
, struct file
*filp
);
423 struct dentry
*trace_create_file(const char *name
,
425 struct dentry
*parent
,
427 const struct file_operations
*fops
);
429 struct dentry
*tracing_init_dentry(void);
430 void init_tracer_sysprof_debugfs(struct dentry
*d_tracer
);
432 struct ring_buffer_event
;
434 struct ring_buffer_event
*
435 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
440 void trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
441 struct ring_buffer_event
*event
,
442 unsigned long flags
, int pc
);
444 struct trace_entry
*tracing_get_trace_entry(struct trace_array
*tr
,
445 struct trace_array_cpu
*data
);
447 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
448 int *ent_cpu
, u64
*ent_ts
);
450 void default_wait_pipe(struct trace_iterator
*iter
);
451 void poll_wait_pipe(struct trace_iterator
*iter
);
453 void ftrace(struct trace_array
*tr
,
454 struct trace_array_cpu
*data
,
456 unsigned long parent_ip
,
457 unsigned long flags
, int pc
);
458 void tracing_sched_switch_trace(struct trace_array
*tr
,
459 struct task_struct
*prev
,
460 struct task_struct
*next
,
461 unsigned long flags
, int pc
);
463 void tracing_sched_wakeup_trace(struct trace_array
*tr
,
464 struct task_struct
*wakee
,
465 struct task_struct
*cur
,
466 unsigned long flags
, int pc
);
467 void trace_special(struct trace_array
*tr
,
468 struct trace_array_cpu
*data
,
471 unsigned long arg3
, int pc
);
472 void trace_function(struct trace_array
*tr
,
474 unsigned long parent_ip
,
475 unsigned long flags
, int pc
);
477 void trace_graph_return(struct ftrace_graph_ret
*trace
);
478 int trace_graph_entry(struct ftrace_graph_ent
*trace
);
479 void set_graph_array(struct trace_array
*tr
);
481 void tracing_start_cmdline_record(void);
482 void tracing_stop_cmdline_record(void);
483 void tracing_sched_switch_assign_trace(struct trace_array
*tr
);
484 void tracing_stop_sched_switch_record(void);
485 void tracing_start_sched_switch_record(void);
486 int register_tracer(struct tracer
*type
);
487 void unregister_tracer(struct tracer
*type
);
489 extern unsigned long nsecs_to_usecs(unsigned long nsecs
);
491 #ifdef CONFIG_TRACER_MAX_TRACE
492 extern unsigned long tracing_max_latency
;
493 extern unsigned long tracing_thresh
;
495 void update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
);
496 void update_max_tr_single(struct trace_array
*tr
,
497 struct task_struct
*tsk
, int cpu
);
498 #endif /* CONFIG_TRACER_MAX_TRACE */
500 #ifdef CONFIG_STACKTRACE
501 void ftrace_trace_stack(struct ring_buffer
*buffer
, unsigned long flags
,
504 void ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
,
507 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
510 static inline void ftrace_trace_stack(struct trace_array
*tr
,
511 unsigned long flags
, int skip
, int pc
)
515 static inline void ftrace_trace_userstack(struct trace_array
*tr
,
516 unsigned long flags
, int pc
)
520 static inline void __trace_stack(struct trace_array
*tr
, unsigned long flags
,
524 #endif /* CONFIG_STACKTRACE */
526 extern cycle_t
ftrace_now(int cpu
);
528 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
530 (*tracer_switch_func_t
)(void *private,
532 struct task_struct
*prev
,
533 struct task_struct
*next
);
535 struct tracer_switch_ops
{
536 tracer_switch_func_t func
;
538 struct tracer_switch_ops
*next
;
540 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
542 extern void trace_find_cmdline(int pid
, char comm
[]);
544 #ifdef CONFIG_DYNAMIC_FTRACE
545 extern unsigned long ftrace_update_tot_cnt
;
546 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
547 extern int DYN_FTRACE_TEST_NAME(void);
550 extern int ring_buffer_expanded
;
551 extern bool tracing_selftest_disabled
;
552 DECLARE_PER_CPU(local_t
, ftrace_cpu_disabled
);
554 #ifdef CONFIG_FTRACE_STARTUP_TEST
555 extern int trace_selftest_startup_function(struct tracer
*trace
,
556 struct trace_array
*tr
);
557 extern int trace_selftest_startup_function_graph(struct tracer
*trace
,
558 struct trace_array
*tr
);
559 extern int trace_selftest_startup_irqsoff(struct tracer
*trace
,
560 struct trace_array
*tr
);
561 extern int trace_selftest_startup_preemptoff(struct tracer
*trace
,
562 struct trace_array
*tr
);
563 extern int trace_selftest_startup_preemptirqsoff(struct tracer
*trace
,
564 struct trace_array
*tr
);
565 extern int trace_selftest_startup_wakeup(struct tracer
*trace
,
566 struct trace_array
*tr
);
567 extern int trace_selftest_startup_nop(struct tracer
*trace
,
568 struct trace_array
*tr
);
569 extern int trace_selftest_startup_sched_switch(struct tracer
*trace
,
570 struct trace_array
*tr
);
571 extern int trace_selftest_startup_sysprof(struct tracer
*trace
,
572 struct trace_array
*tr
);
573 extern int trace_selftest_startup_branch(struct tracer
*trace
,
574 struct trace_array
*tr
);
575 extern int trace_selftest_startup_hw_branches(struct tracer
*trace
,
576 struct trace_array
*tr
);
577 extern int trace_selftest_startup_ksym(struct tracer
*trace
,
578 struct trace_array
*tr
);
579 #endif /* CONFIG_FTRACE_STARTUP_TEST */
581 extern void *head_page(struct trace_array_cpu
*data
);
582 extern unsigned long long ns2usecs(cycle_t nsec
);
584 trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
);
586 trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
);
588 trace_array_vprintk(struct trace_array
*tr
,
589 unsigned long ip
, const char *fmt
, va_list args
);
590 int trace_array_printk(struct trace_array
*tr
,
591 unsigned long ip
, const char *fmt
, ...);
593 extern unsigned long trace_flags
;
595 extern int trace_clock_id
;
597 /* Standard output formatting function used for function return traces */
598 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
599 extern enum print_line_t
print_graph_function(struct trace_iterator
*iter
);
600 extern enum print_line_t
601 trace_print_graph_duration(unsigned long long duration
, struct trace_seq
*s
);
603 #ifdef CONFIG_DYNAMIC_FTRACE
604 /* TODO: make this variable */
605 #define FTRACE_GRAPH_MAX_FUNCS 32
606 extern int ftrace_graph_count
;
607 extern unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
];
609 static inline int ftrace_graph_addr(unsigned long addr
)
613 if (!ftrace_graph_count
|| test_tsk_trace_graph(current
))
616 for (i
= 0; i
< ftrace_graph_count
; i
++) {
617 if (addr
== ftrace_graph_funcs
[i
])
624 static inline int ftrace_trace_addr(unsigned long addr
)
628 static inline int ftrace_graph_addr(unsigned long addr
)
632 #endif /* CONFIG_DYNAMIC_FTRACE */
633 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
634 static inline enum print_line_t
635 print_graph_function(struct trace_iterator
*iter
)
637 return TRACE_TYPE_UNHANDLED
;
639 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
641 extern struct pid
*ftrace_pid_trace
;
643 #ifdef CONFIG_FUNCTION_TRACER
644 static inline int ftrace_trace_task(struct task_struct
*task
)
646 if (!ftrace_pid_trace
)
649 return test_tsk_trace_trace(task
);
652 static inline int ftrace_trace_task(struct task_struct
*task
)
659 * trace_iterator_flags is an enumeration that defines bit
660 * positions into trace_flags that controls the output.
662 * NOTE: These bits must match the trace_options array in
665 enum trace_iterator_flags
{
666 TRACE_ITER_PRINT_PARENT
= 0x01,
667 TRACE_ITER_SYM_OFFSET
= 0x02,
668 TRACE_ITER_SYM_ADDR
= 0x04,
669 TRACE_ITER_VERBOSE
= 0x08,
670 TRACE_ITER_RAW
= 0x10,
671 TRACE_ITER_HEX
= 0x20,
672 TRACE_ITER_BIN
= 0x40,
673 TRACE_ITER_BLOCK
= 0x80,
674 TRACE_ITER_STACKTRACE
= 0x100,
675 TRACE_ITER_SCHED_TREE
= 0x200,
676 TRACE_ITER_PRINTK
= 0x400,
677 TRACE_ITER_PREEMPTONLY
= 0x800,
678 TRACE_ITER_BRANCH
= 0x1000,
679 TRACE_ITER_ANNOTATE
= 0x2000,
680 TRACE_ITER_USERSTACKTRACE
= 0x4000,
681 TRACE_ITER_SYM_USEROBJ
= 0x8000,
682 TRACE_ITER_PRINTK_MSGONLY
= 0x10000,
683 TRACE_ITER_CONTEXT_INFO
= 0x20000, /* Print pid/cpu/time */
684 TRACE_ITER_LATENCY_FMT
= 0x40000,
685 TRACE_ITER_SLEEP_TIME
= 0x80000,
686 TRACE_ITER_GRAPH_TIME
= 0x100000,
690 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
691 * control the output of kernel symbols.
693 #define TRACE_ITER_SYM_MASK \
694 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
696 extern struct tracer nop_trace
;
699 * ftrace_preempt_disable - disable preemption scheduler safe
701 * When tracing can happen inside the scheduler, there exists
702 * cases that the tracing might happen before the need_resched
703 * flag is checked. If this happens and the tracer calls
704 * preempt_enable (after a disable), a schedule might take place
705 * causing an infinite recursion.
707 * To prevent this, we read the need_resched flag before
708 * disabling preemption. When we want to enable preemption we
709 * check the flag, if it is set, then we call preempt_enable_no_resched.
710 * Otherwise, we call preempt_enable.
712 * The rational for doing the above is that if need_resched is set
713 * and we have yet to reschedule, we are either in an atomic location
714 * (where we do not need to check for scheduling) or we are inside
715 * the scheduler and do not want to resched.
717 static inline int ftrace_preempt_disable(void)
721 resched
= need_resched();
722 preempt_disable_notrace();
728 * ftrace_preempt_enable - enable preemption scheduler safe
729 * @resched: the return value from ftrace_preempt_disable
731 * This is a scheduler safe way to enable preemption and not miss
732 * any preemption checks. The disabled saved the state of preemption.
733 * If resched is set, then we are either inside an atomic or
734 * are inside the scheduler (we would have already scheduled
735 * otherwise). In this case, we do not want to call normal
736 * preempt_enable, but preempt_enable_no_resched instead.
738 static inline void ftrace_preempt_enable(int resched
)
741 preempt_enable_no_resched_notrace();
743 preempt_enable_notrace();
746 #ifdef CONFIG_BRANCH_TRACER
747 extern int enable_branch_tracing(struct trace_array
*tr
);
748 extern void disable_branch_tracing(void);
749 static inline int trace_branch_enable(struct trace_array
*tr
)
751 if (trace_flags
& TRACE_ITER_BRANCH
)
752 return enable_branch_tracing(tr
);
755 static inline void trace_branch_disable(void)
757 /* due to races, always disable */
758 disable_branch_tracing();
761 static inline int trace_branch_enable(struct trace_array
*tr
)
765 static inline void trace_branch_disable(void)
768 #endif /* CONFIG_BRANCH_TRACER */
770 /* set ring buffers to default size if not already done so */
771 int tracing_update_buffers(void);
773 /* trace event type bit fields, not numeric */
775 TRACE_EVENT_TYPE_PRINTF
= 1,
776 TRACE_EVENT_TYPE_RAW
= 2,
779 struct ftrace_event_field
{
780 struct list_head link
;
789 struct event_filter
{
791 struct filter_pred
**preds
;
796 struct event_subsystem
{
797 struct list_head list
;
799 struct dentry
*entry
;
800 struct event_filter
*filter
;
806 typedef int (*filter_pred_fn_t
) (struct filter_pred
*pred
, void *event
,
812 char str_val
[MAX_FILTER_STR_VAL
];
821 extern void print_event_filter(struct ftrace_event_call
*call
,
822 struct trace_seq
*s
);
823 extern int apply_event_filter(struct ftrace_event_call
*call
,
824 char *filter_string
);
825 extern int apply_subsystem_event_filter(struct event_subsystem
*system
,
826 char *filter_string
);
827 extern void print_subsystem_event_filter(struct event_subsystem
*system
,
828 struct trace_seq
*s
);
829 extern int filter_assign_type(const char *type
);
832 filter_check_discard(struct ftrace_event_call
*call
, void *rec
,
833 struct ring_buffer
*buffer
,
834 struct ring_buffer_event
*event
)
836 if (unlikely(call
->filter_active
) && !filter_match_preds(call
, rec
)) {
837 ring_buffer_discard_commit(buffer
, event
);
844 #define DEFINE_COMPARISON_PRED(type) \
845 static int filter_pred_##type(struct filter_pred *pred, void *event, \
846 int val1, int val2) \
848 type *addr = (type *)(event + pred->offset); \
849 type val = (type)pred->val; \
852 switch (pred->op) { \
854 match = (*addr < val); \
857 match = (*addr <= val); \
860 match = (*addr > val); \
863 match = (*addr >= val); \
872 #define DEFINE_EQUALITY_PRED(size) \
873 static int filter_pred_##size(struct filter_pred *pred, void *event, \
874 int val1, int val2) \
876 u##size *addr = (u##size *)(event + pred->offset); \
877 u##size val = (u##size)pred->val; \
880 match = (val == *addr) ^ pred->not; \
885 extern struct mutex event_mutex
;
886 extern struct list_head ftrace_events
;
888 extern const char *__start___trace_bprintk_fmt
[];
889 extern const char *__stop___trace_bprintk_fmt
[];
891 #undef TRACE_EVENT_FORMAT
892 #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
893 extern struct ftrace_event_call event_##call;
894 #undef TRACE_EVENT_FORMAT_NOFILTER
895 #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, tpfmt)
896 #include "trace_event_types.h"
898 #endif /* _LINUX_KERNEL_TRACE_H */