| 1 | #ifndef _LINUX_FTRACE_EVENT_H |
| 2 | #define _LINUX_FTRACE_EVENT_H |
| 3 | |
| 4 | #include <linux/ring_buffer.h> |
| 5 | #include <linux/trace_seq.h> |
| 6 | #include <linux/percpu.h> |
| 7 | #include <linux/hardirq.h> |
| 8 | #include <linux/perf_event.h> |
| 9 | |
| 10 | struct trace_array; |
| 11 | struct tracer; |
| 12 | struct dentry; |
| 13 | |
| 14 | struct trace_print_flags { |
| 15 | unsigned long mask; |
| 16 | const char *name; |
| 17 | }; |
| 18 | |
| 19 | struct trace_print_flags_u64 { |
| 20 | unsigned long long mask; |
| 21 | const char *name; |
| 22 | }; |
| 23 | |
| 24 | const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, |
| 25 | unsigned long flags, |
| 26 | const struct trace_print_flags *flag_array); |
| 27 | |
| 28 | const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, |
| 29 | const struct trace_print_flags *symbol_array); |
| 30 | |
| 31 | #if BITS_PER_LONG == 32 |
| 32 | const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, |
| 33 | unsigned long long val, |
| 34 | const struct trace_print_flags_u64 |
| 35 | *symbol_array); |
| 36 | #endif |
| 37 | |
| 38 | const char *ftrace_print_hex_seq(struct trace_seq *p, |
| 39 | const unsigned char *buf, int len); |
| 40 | |
| 41 | /* |
| 42 | * The trace entry - the most basic unit of tracing. This is what |
| 43 | * is printed in the end as a single line in the trace output, such as: |
| 44 | * |
| 45 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter |
| 46 | */ |
| 47 | struct trace_entry { |
| 48 | unsigned short type; |
| 49 | unsigned char flags; |
| 50 | unsigned char preempt_count; |
| 51 | int pid; |
| 52 | int padding; |
| 53 | }; |
| 54 | |
| 55 | #define FTRACE_MAX_EVENT \ |
| 56 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) |
| 57 | |
| 58 | /* |
| 59 | * Trace iterator - used by printout routines who present trace |
| 60 | * results to users and which routines might sleep, etc: |
| 61 | */ |
| 62 | struct trace_iterator { |
| 63 | struct trace_array *tr; |
| 64 | struct tracer *trace; |
| 65 | void *private; |
| 66 | int cpu_file; |
| 67 | struct mutex mutex; |
| 68 | struct ring_buffer_iter **buffer_iter; |
| 69 | unsigned long iter_flags; |
| 70 | |
| 71 | /* trace_seq for __print_flags() and __print_symbolic() etc. */ |
| 72 | struct trace_seq tmp_seq; |
| 73 | |
| 74 | /* The below is zeroed out in pipe_read */ |
| 75 | struct trace_seq seq; |
| 76 | struct trace_entry *ent; |
| 77 | unsigned long lost_events; |
| 78 | int leftover; |
| 79 | int ent_size; |
| 80 | int cpu; |
| 81 | u64 ts; |
| 82 | |
| 83 | loff_t pos; |
| 84 | long idx; |
| 85 | |
| 86 | cpumask_var_t started; |
| 87 | }; |
| 88 | |
| 89 | enum trace_iter_flags { |
| 90 | TRACE_FILE_LAT_FMT = 1, |
| 91 | TRACE_FILE_ANNOTATE = 2, |
| 92 | TRACE_FILE_TIME_IN_NS = 4, |
| 93 | }; |
| 94 | |
| 95 | |
| 96 | struct trace_event; |
| 97 | |
| 98 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, |
| 99 | int flags, struct trace_event *event); |
| 100 | |
| 101 | struct trace_event_functions { |
| 102 | trace_print_func trace; |
| 103 | trace_print_func raw; |
| 104 | trace_print_func hex; |
| 105 | trace_print_func binary; |
| 106 | }; |
| 107 | |
| 108 | struct trace_event { |
| 109 | struct hlist_node node; |
| 110 | struct list_head list; |
| 111 | int type; |
| 112 | struct trace_event_functions *funcs; |
| 113 | }; |
| 114 | |
| 115 | extern int register_ftrace_event(struct trace_event *event); |
| 116 | extern int unregister_ftrace_event(struct trace_event *event); |
| 117 | |
| 118 | /* Return values for print_line callback */ |
| 119 | enum print_line_t { |
| 120 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ |
| 121 | TRACE_TYPE_HANDLED = 1, |
| 122 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ |
| 123 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ |
| 124 | }; |
| 125 | |
| 126 | void tracing_generic_entry_update(struct trace_entry *entry, |
| 127 | unsigned long flags, |
| 128 | int pc); |
| 129 | struct ring_buffer_event * |
| 130 | trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, |
| 131 | int type, unsigned long len, |
| 132 | unsigned long flags, int pc); |
| 133 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
| 134 | struct ring_buffer_event *event, |
| 135 | unsigned long flags, int pc); |
| 136 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
| 137 | struct ring_buffer_event *event, |
| 138 | unsigned long flags, int pc); |
| 139 | void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, |
| 140 | struct ring_buffer_event *event, |
| 141 | unsigned long flags, int pc, |
| 142 | struct pt_regs *regs); |
| 143 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
| 144 | struct ring_buffer_event *event); |
| 145 | |
| 146 | void tracing_record_cmdline(struct task_struct *tsk); |
| 147 | |
| 148 | struct event_filter; |
| 149 | |
| 150 | enum trace_reg { |
| 151 | TRACE_REG_REGISTER, |
| 152 | TRACE_REG_UNREGISTER, |
| 153 | #ifdef CONFIG_PERF_EVENTS |
| 154 | TRACE_REG_PERF_REGISTER, |
| 155 | TRACE_REG_PERF_UNREGISTER, |
| 156 | TRACE_REG_PERF_OPEN, |
| 157 | TRACE_REG_PERF_CLOSE, |
| 158 | TRACE_REG_PERF_ADD, |
| 159 | TRACE_REG_PERF_DEL, |
| 160 | #endif |
| 161 | }; |
| 162 | |
| 163 | struct ftrace_event_call; |
| 164 | |
| 165 | struct ftrace_event_class { |
| 166 | char *system; |
| 167 | void *probe; |
| 168 | #ifdef CONFIG_PERF_EVENTS |
| 169 | void *perf_probe; |
| 170 | #endif |
| 171 | int (*reg)(struct ftrace_event_call *event, |
| 172 | enum trace_reg type, void *data); |
| 173 | int (*define_fields)(struct ftrace_event_call *); |
| 174 | struct list_head *(*get_fields)(struct ftrace_event_call *); |
| 175 | struct list_head fields; |
| 176 | int (*raw_init)(struct ftrace_event_call *); |
| 177 | }; |
| 178 | |
| 179 | extern int ftrace_event_reg(struct ftrace_event_call *event, |
| 180 | enum trace_reg type, void *data); |
| 181 | |
| 182 | enum { |
| 183 | TRACE_EVENT_FL_ENABLED_BIT, |
| 184 | TRACE_EVENT_FL_FILTERED_BIT, |
| 185 | TRACE_EVENT_FL_RECORDED_CMD_BIT, |
| 186 | TRACE_EVENT_FL_CAP_ANY_BIT, |
| 187 | TRACE_EVENT_FL_NO_SET_FILTER_BIT, |
| 188 | TRACE_EVENT_FL_IGNORE_ENABLE_BIT, |
| 189 | }; |
| 190 | |
| 191 | enum { |
| 192 | TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), |
| 193 | TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), |
| 194 | TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), |
| 195 | TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), |
| 196 | TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), |
| 197 | TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), |
| 198 | }; |
| 199 | |
| 200 | struct ftrace_event_call { |
| 201 | struct list_head list; |
| 202 | struct ftrace_event_class *class; |
| 203 | char *name; |
| 204 | struct dentry *dir; |
| 205 | struct trace_event event; |
| 206 | const char *print_fmt; |
| 207 | struct event_filter *filter; |
| 208 | void *mod; |
| 209 | void *data; |
| 210 | |
| 211 | /* |
| 212 | * 32 bit flags: |
| 213 | * bit 1: enabled |
| 214 | * bit 2: filter_active |
| 215 | * bit 3: enabled cmd record |
| 216 | * bit 4: allow trace by non root (cap any) |
| 217 | * bit 5: failed to apply filter |
| 218 | * bit 6: ftrace internal event (do not enable) |
| 219 | * |
| 220 | * Changes to flags must hold the event_mutex. |
| 221 | * |
| 222 | * Note: Reads of flags do not hold the event_mutex since |
| 223 | * they occur in critical sections. But the way flags |
| 224 | * is currently used, these changes do no affect the code |
| 225 | * except that when a change is made, it may have a slight |
| 226 | * delay in propagating the changes to other CPUs due to |
| 227 | * caching and such. |
| 228 | */ |
| 229 | unsigned int flags; |
| 230 | |
| 231 | #ifdef CONFIG_PERF_EVENTS |
| 232 | int perf_refcount; |
| 233 | struct hlist_head __percpu *perf_events; |
| 234 | #endif |
| 235 | }; |
| 236 | |
| 237 | #define __TRACE_EVENT_FLAGS(name, value) \ |
| 238 | static int __init trace_init_flags_##name(void) \ |
| 239 | { \ |
| 240 | event_##name.flags = value; \ |
| 241 | return 0; \ |
| 242 | } \ |
| 243 | early_initcall(trace_init_flags_##name); |
| 244 | |
| 245 | #define PERF_MAX_TRACE_SIZE 2048 |
| 246 | |
| 247 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |
| 248 | |
| 249 | extern void destroy_preds(struct ftrace_event_call *call); |
| 250 | extern int filter_match_preds(struct event_filter *filter, void *rec); |
| 251 | extern int filter_current_check_discard(struct ring_buffer *buffer, |
| 252 | struct ftrace_event_call *call, |
| 253 | void *rec, |
| 254 | struct ring_buffer_event *event); |
| 255 | |
| 256 | enum { |
| 257 | FILTER_OTHER = 0, |
| 258 | FILTER_STATIC_STRING, |
| 259 | FILTER_DYN_STRING, |
| 260 | FILTER_PTR_STRING, |
| 261 | FILTER_TRACE_FN, |
| 262 | }; |
| 263 | |
| 264 | #define EVENT_STORAGE_SIZE 128 |
| 265 | extern struct mutex event_storage_mutex; |
| 266 | extern char event_storage[EVENT_STORAGE_SIZE]; |
| 267 | |
| 268 | extern int trace_event_raw_init(struct ftrace_event_call *call); |
| 269 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, |
| 270 | const char *name, int offset, int size, |
| 271 | int is_signed, int filter_type); |
| 272 | extern int trace_add_event_call(struct ftrace_event_call *call); |
| 273 | extern void trace_remove_event_call(struct ftrace_event_call *call); |
| 274 | |
| 275 | #define is_signed_type(type) (((type)(-1)) < (type)0) |
| 276 | |
| 277 | int trace_set_clr_event(const char *system, const char *event, int set); |
| 278 | |
| 279 | /* |
| 280 | * The double __builtin_constant_p is because gcc will give us an error |
| 281 | * if we try to allocate the static variable to fmt if it is not a |
| 282 | * constant. Even with the outer if statement optimizing out. |
| 283 | */ |
| 284 | #define event_trace_printk(ip, fmt, args...) \ |
| 285 | do { \ |
| 286 | __trace_printk_check_format(fmt, ##args); \ |
| 287 | tracing_record_cmdline(current); \ |
| 288 | if (__builtin_constant_p(fmt)) { \ |
| 289 | static const char *trace_printk_fmt \ |
| 290 | __attribute__((section("__trace_printk_fmt"))) = \ |
| 291 | __builtin_constant_p(fmt) ? fmt : NULL; \ |
| 292 | \ |
| 293 | __trace_bprintk(ip, trace_printk_fmt, ##args); \ |
| 294 | } else \ |
| 295 | __trace_printk(ip, fmt, ##args); \ |
| 296 | } while (0) |
| 297 | |
| 298 | #ifdef CONFIG_PERF_EVENTS |
| 299 | struct perf_event; |
| 300 | |
| 301 | DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); |
| 302 | |
| 303 | extern int perf_trace_init(struct perf_event *event); |
| 304 | extern void perf_trace_destroy(struct perf_event *event); |
| 305 | extern int perf_trace_add(struct perf_event *event, int flags); |
| 306 | extern void perf_trace_del(struct perf_event *event, int flags); |
| 307 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
| 308 | char *filter_str); |
| 309 | extern void ftrace_profile_free_filter(struct perf_event *event); |
| 310 | extern void *perf_trace_buf_prepare(int size, unsigned short type, |
| 311 | struct pt_regs *regs, int *rctxp); |
| 312 | |
| 313 | static inline void |
| 314 | perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, |
| 315 | u64 count, struct pt_regs *regs, void *head, |
| 316 | struct task_struct *task) |
| 317 | { |
| 318 | perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task); |
| 319 | } |
| 320 | #endif |
| 321 | |
| 322 | #endif /* _LINUX_FTRACE_EVENT_H */ |