| 1 | /* |
| 2 | * Ftrace header. For implementation details beyond the random comments |
| 3 | * scattered below, see: Documentation/trace/ftrace-design.txt |
| 4 | */ |
| 5 | |
| 6 | #ifndef _LINUX_FTRACE_H |
| 7 | #define _LINUX_FTRACE_H |
| 8 | |
| 9 | #include <linux/trace_clock.h> |
| 10 | #include <linux/kallsyms.h> |
| 11 | #include <linux/linkage.h> |
| 12 | #include <linux/bitops.h> |
| 13 | #include <linux/ptrace.h> |
| 14 | #include <linux/ktime.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/fs.h> |
| 19 | |
| 20 | #include <asm/ftrace.h> |
| 21 | |
| 22 | /* |
| 23 | * If the arch supports passing the variable contents of |
| 24 | * function_trace_op as the third parameter back from the |
| 25 | * mcount call, then the arch should define this as 1. |
| 26 | */ |
| 27 | #ifndef ARCH_SUPPORTS_FTRACE_OPS |
| 28 | #define ARCH_SUPPORTS_FTRACE_OPS 0 |
| 29 | #endif |
| 30 | |
| 31 | /* |
| 32 | * If the arch's mcount caller does not support all of ftrace's |
| 33 | * features, then it must call an indirect function that |
| 34 | * does. Or at least does enough to prevent any unwelcomed side effects. |
| 35 | */ |
| 36 | #if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \ |
| 37 | !ARCH_SUPPORTS_FTRACE_OPS |
| 38 | # define FTRACE_FORCE_LIST_FUNC 1 |
| 39 | #else |
| 40 | # define FTRACE_FORCE_LIST_FUNC 0 |
| 41 | #endif |
| 42 | |
| 43 | |
| 44 | struct module; |
| 45 | struct ftrace_hash; |
| 46 | |
| 47 | #ifdef CONFIG_FUNCTION_TRACER |
| 48 | |
| 49 | extern int ftrace_enabled; |
| 50 | extern int |
| 51 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
| 52 | void __user *buffer, size_t *lenp, |
| 53 | loff_t *ppos); |
| 54 | |
| 55 | struct ftrace_ops; |
| 56 | |
| 57 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, |
| 58 | struct ftrace_ops *op, struct pt_regs *regs); |
| 59 | |
| 60 | /* |
| 61 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are |
| 62 | * set in the flags member. |
| 63 | * |
| 64 | * ENABLED - set/unset when ftrace_ops is registered/unregistered |
| 65 | * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops |
| 66 | * is part of the global tracers sharing the same filter |
| 67 | * via set_ftrace_* debugfs files. |
| 68 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
| 69 | * allocated ftrace_ops which need special care |
| 70 | * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops |
| 71 | * could be controled by following calls: |
| 72 | * ftrace_function_local_enable |
| 73 | * ftrace_function_local_disable |
| 74 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called |
| 75 | * and passed to the callback. If this flag is set, but the |
| 76 | * architecture does not support passing regs |
| 77 | * (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the |
| 78 | * ftrace_ops will fail to register, unless the next flag |
| 79 | * is set. |
| 80 | * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the |
| 81 | * handler can handle an arch that does not save regs |
| 82 | * (the handler tests if regs == NULL), then it can set |
| 83 | * this flag instead. It will not fail registering the ftrace_ops |
| 84 | * but, the regs field will be NULL if the arch does not support |
| 85 | * passing regs to the handler. |
| 86 | * Note, if this flag is set, the SAVE_REGS flag will automatically |
| 87 | * get set upon registering the ftrace_ops, if the arch supports it. |
| 88 | * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure |
| 89 | * that the call back has its own recursion protection. If it does |
| 90 | * not set this, then the ftrace infrastructure will add recursion |
| 91 | * protection for the caller. |
| 92 | */ |
| 93 | enum { |
| 94 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
| 95 | FTRACE_OPS_FL_GLOBAL = 1 << 1, |
| 96 | FTRACE_OPS_FL_DYNAMIC = 1 << 2, |
| 97 | FTRACE_OPS_FL_CONTROL = 1 << 3, |
| 98 | FTRACE_OPS_FL_SAVE_REGS = 1 << 4, |
| 99 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, |
| 100 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, |
| 101 | }; |
| 102 | |
| 103 | struct ftrace_ops { |
| 104 | ftrace_func_t func; |
| 105 | struct ftrace_ops *next; |
| 106 | unsigned long flags; |
| 107 | int __percpu *disabled; |
| 108 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 109 | struct ftrace_hash *notrace_hash; |
| 110 | struct ftrace_hash *filter_hash; |
| 111 | #endif |
| 112 | }; |
| 113 | |
| 114 | extern int function_trace_stop; |
| 115 | |
| 116 | /* |
| 117 | * Type of the current tracing. |
| 118 | */ |
| 119 | enum ftrace_tracing_type_t { |
| 120 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ |
| 121 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ |
| 122 | }; |
| 123 | |
| 124 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ |
| 125 | extern enum ftrace_tracing_type_t ftrace_tracing_type; |
| 126 | |
| 127 | /** |
| 128 | * ftrace_stop - stop function tracer. |
| 129 | * |
| 130 | * A quick way to stop the function tracer. Note this an on off switch, |
| 131 | * it is not something that is recursive like preempt_disable. |
| 132 | * This does not disable the calling of mcount, it only stops the |
| 133 | * calling of functions from mcount. |
| 134 | */ |
| 135 | static inline void ftrace_stop(void) |
| 136 | { |
| 137 | function_trace_stop = 1; |
| 138 | } |
| 139 | |
| 140 | /** |
| 141 | * ftrace_start - start the function tracer. |
| 142 | * |
| 143 | * This function is the inverse of ftrace_stop. This does not enable |
| 144 | * the function tracing if the function tracer is disabled. This only |
| 145 | * sets the function tracer flag to continue calling the functions |
| 146 | * from mcount. |
| 147 | */ |
| 148 | static inline void ftrace_start(void) |
| 149 | { |
| 150 | function_trace_stop = 0; |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * The ftrace_ops must be a static and should also |
| 155 | * be read_mostly. These functions do modify read_mostly variables |
| 156 | * so use them sparely. Never free an ftrace_op or modify the |
| 157 | * next pointer after it has been registered. Even after unregistering |
| 158 | * it, the next pointer may still be used internally. |
| 159 | */ |
| 160 | int register_ftrace_function(struct ftrace_ops *ops); |
| 161 | int unregister_ftrace_function(struct ftrace_ops *ops); |
| 162 | void clear_ftrace_function(void); |
| 163 | |
| 164 | /** |
| 165 | * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu |
| 166 | * |
| 167 | * This function enables tracing on current cpu by decreasing |
| 168 | * the per cpu control variable. |
| 169 | * It must be called with preemption disabled and only on ftrace_ops |
| 170 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption |
| 171 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
| 172 | */ |
| 173 | static inline void ftrace_function_local_enable(struct ftrace_ops *ops) |
| 174 | { |
| 175 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) |
| 176 | return; |
| 177 | |
| 178 | (*this_cpu_ptr(ops->disabled))--; |
| 179 | } |
| 180 | |
| 181 | /** |
| 182 | * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu |
| 183 | * |
| 184 | * This function enables tracing on current cpu by decreasing |
| 185 | * the per cpu control variable. |
| 186 | * It must be called with preemption disabled and only on ftrace_ops |
| 187 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption |
| 188 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
| 189 | */ |
| 190 | static inline void ftrace_function_local_disable(struct ftrace_ops *ops) |
| 191 | { |
| 192 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) |
| 193 | return; |
| 194 | |
| 195 | (*this_cpu_ptr(ops->disabled))++; |
| 196 | } |
| 197 | |
| 198 | /** |
| 199 | * ftrace_function_local_disabled - returns ftrace_ops disabled value |
| 200 | * on current cpu |
| 201 | * |
| 202 | * This function returns value of ftrace_ops::disabled on current cpu. |
| 203 | * It must be called with preemption disabled and only on ftrace_ops |
| 204 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption |
| 205 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
| 206 | */ |
| 207 | static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) |
| 208 | { |
| 209 | WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); |
| 210 | return *this_cpu_ptr(ops->disabled); |
| 211 | } |
| 212 | |
| 213 | extern void ftrace_stub(unsigned long a0, unsigned long a1, |
| 214 | struct ftrace_ops *op, struct pt_regs *regs); |
| 215 | |
| 216 | #else /* !CONFIG_FUNCTION_TRACER */ |
| 217 | /* |
| 218 | * (un)register_ftrace_function must be a macro since the ops parameter |
| 219 | * must not be evaluated. |
| 220 | */ |
| 221 | #define register_ftrace_function(ops) ({ 0; }) |
| 222 | #define unregister_ftrace_function(ops) ({ 0; }) |
| 223 | static inline int ftrace_nr_registered_ops(void) |
| 224 | { |
| 225 | return 0; |
| 226 | } |
| 227 | static inline void clear_ftrace_function(void) { } |
| 228 | static inline void ftrace_kill(void) { } |
| 229 | static inline void ftrace_stop(void) { } |
| 230 | static inline void ftrace_start(void) { } |
| 231 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 232 | |
| 233 | #ifdef CONFIG_STACK_TRACER |
| 234 | extern int stack_tracer_enabled; |
| 235 | int |
| 236 | stack_trace_sysctl(struct ctl_table *table, int write, |
| 237 | void __user *buffer, size_t *lenp, |
| 238 | loff_t *ppos); |
| 239 | #endif |
| 240 | |
| 241 | struct ftrace_func_command { |
| 242 | struct list_head list; |
| 243 | char *name; |
| 244 | int (*func)(struct ftrace_hash *hash, |
| 245 | char *func, char *cmd, |
| 246 | char *params, int enable); |
| 247 | }; |
| 248 | |
| 249 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 250 | |
| 251 | int ftrace_arch_code_modify_prepare(void); |
| 252 | int ftrace_arch_code_modify_post_process(void); |
| 253 | |
| 254 | void ftrace_bug(int err, unsigned long ip); |
| 255 | |
| 256 | struct seq_file; |
| 257 | |
| 258 | struct ftrace_probe_ops { |
| 259 | void (*func)(unsigned long ip, |
| 260 | unsigned long parent_ip, |
| 261 | void **data); |
| 262 | int (*callback)(unsigned long ip, void **data); |
| 263 | void (*free)(void **data); |
| 264 | int (*print)(struct seq_file *m, |
| 265 | unsigned long ip, |
| 266 | struct ftrace_probe_ops *ops, |
| 267 | void *data); |
| 268 | }; |
| 269 | |
| 270 | extern int |
| 271 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
| 272 | void *data); |
| 273 | extern void |
| 274 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
| 275 | void *data); |
| 276 | extern void |
| 277 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); |
| 278 | extern void unregister_ftrace_function_probe_all(char *glob); |
| 279 | |
| 280 | extern int ftrace_text_reserved(void *start, void *end); |
| 281 | |
| 282 | extern int ftrace_nr_registered_ops(void); |
| 283 | |
| 284 | /* |
| 285 | * The dyn_ftrace record's flags field is split into two parts. |
| 286 | * the first part which is '0-FTRACE_REF_MAX' is a counter of |
| 287 | * the number of callbacks that have registered the function that |
| 288 | * the dyn_ftrace descriptor represents. |
| 289 | * |
| 290 | * The second part is a mask: |
| 291 | * ENABLED - the function is being traced |
| 292 | * REGS - the record wants the function to save regs |
| 293 | * REGS_EN - the function is set up to save regs. |
| 294 | * |
| 295 | * When a new ftrace_ops is registered and wants a function to save |
| 296 | * pt_regs, the rec->flag REGS is set. When the function has been |
| 297 | * set up to save regs, the REG_EN flag is set. Once a function |
| 298 | * starts saving regs it will do so until all ftrace_ops are removed |
| 299 | * from tracing that function. |
| 300 | */ |
| 301 | enum { |
| 302 | FTRACE_FL_ENABLED = (1UL << 29), |
| 303 | FTRACE_FL_REGS = (1UL << 30), |
| 304 | FTRACE_FL_REGS_EN = (1UL << 31) |
| 305 | }; |
| 306 | |
| 307 | #define FTRACE_FL_MASK (0x7UL << 29) |
| 308 | #define FTRACE_REF_MAX ((1UL << 29) - 1) |
| 309 | |
| 310 | struct dyn_ftrace { |
| 311 | union { |
| 312 | unsigned long ip; /* address of mcount call-site */ |
| 313 | struct dyn_ftrace *freelist; |
| 314 | }; |
| 315 | unsigned long flags; |
| 316 | struct dyn_arch_ftrace arch; |
| 317 | }; |
| 318 | |
| 319 | int ftrace_force_update(void); |
| 320 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, |
| 321 | int remove, int reset); |
| 322 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
| 323 | int len, int reset); |
| 324 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
| 325 | int len, int reset); |
| 326 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); |
| 327 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); |
| 328 | void ftrace_free_filter(struct ftrace_ops *ops); |
| 329 | |
| 330 | int register_ftrace_command(struct ftrace_func_command *cmd); |
| 331 | int unregister_ftrace_command(struct ftrace_func_command *cmd); |
| 332 | |
| 333 | enum { |
| 334 | FTRACE_UPDATE_CALLS = (1 << 0), |
| 335 | FTRACE_DISABLE_CALLS = (1 << 1), |
| 336 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
| 337 | FTRACE_START_FUNC_RET = (1 << 3), |
| 338 | FTRACE_STOP_FUNC_RET = (1 << 4), |
| 339 | }; |
| 340 | |
| 341 | /* |
| 342 | * The FTRACE_UPDATE_* enum is used to pass information back |
| 343 | * from the ftrace_update_record() and ftrace_test_record() |
| 344 | * functions. These are called by the code update routines |
| 345 | * to find out what is to be done for a given function. |
| 346 | * |
| 347 | * IGNORE - The function is already what we want it to be |
| 348 | * MAKE_CALL - Start tracing the function |
| 349 | * MODIFY_CALL - Stop saving regs for the function |
| 350 | * MODIFY_CALL_REGS - Start saving regs for the function |
| 351 | * MAKE_NOP - Stop tracing the function |
| 352 | */ |
| 353 | enum { |
| 354 | FTRACE_UPDATE_IGNORE, |
| 355 | FTRACE_UPDATE_MAKE_CALL, |
| 356 | FTRACE_UPDATE_MODIFY_CALL, |
| 357 | FTRACE_UPDATE_MODIFY_CALL_REGS, |
| 358 | FTRACE_UPDATE_MAKE_NOP, |
| 359 | }; |
| 360 | |
| 361 | enum { |
| 362 | FTRACE_ITER_FILTER = (1 << 0), |
| 363 | FTRACE_ITER_NOTRACE = (1 << 1), |
| 364 | FTRACE_ITER_PRINTALL = (1 << 2), |
| 365 | FTRACE_ITER_DO_HASH = (1 << 3), |
| 366 | FTRACE_ITER_HASH = (1 << 4), |
| 367 | FTRACE_ITER_ENABLED = (1 << 5), |
| 368 | }; |
| 369 | |
| 370 | void arch_ftrace_update_code(int command); |
| 371 | |
| 372 | struct ftrace_rec_iter; |
| 373 | |
| 374 | struct ftrace_rec_iter *ftrace_rec_iter_start(void); |
| 375 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); |
| 376 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); |
| 377 | |
| 378 | #define for_ftrace_rec_iter(iter) \ |
| 379 | for (iter = ftrace_rec_iter_start(); \ |
| 380 | iter; \ |
| 381 | iter = ftrace_rec_iter_next(iter)) |
| 382 | |
| 383 | |
| 384 | int ftrace_update_record(struct dyn_ftrace *rec, int enable); |
| 385 | int ftrace_test_record(struct dyn_ftrace *rec, int enable); |
| 386 | void ftrace_run_stop_machine(int command); |
| 387 | unsigned long ftrace_location(unsigned long ip); |
| 388 | |
| 389 | extern ftrace_func_t ftrace_trace_function; |
| 390 | |
| 391 | int ftrace_regex_open(struct ftrace_ops *ops, int flag, |
| 392 | struct inode *inode, struct file *file); |
| 393 | ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, |
| 394 | size_t cnt, loff_t *ppos); |
| 395 | ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, |
| 396 | size_t cnt, loff_t *ppos); |
| 397 | loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin); |
| 398 | int ftrace_regex_release(struct inode *inode, struct file *file); |
| 399 | |
| 400 | void __init |
| 401 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); |
| 402 | |
| 403 | /* defined in arch */ |
| 404 | extern int ftrace_ip_converted(unsigned long ip); |
| 405 | extern int ftrace_dyn_arch_init(void *data); |
| 406 | extern void ftrace_replace_code(int enable); |
| 407 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
| 408 | extern void ftrace_caller(void); |
| 409 | extern void ftrace_regs_caller(void); |
| 410 | extern void ftrace_call(void); |
| 411 | extern void ftrace_regs_call(void); |
| 412 | extern void mcount_call(void); |
| 413 | |
| 414 | void ftrace_modify_all_code(int command); |
| 415 | |
| 416 | #ifndef FTRACE_ADDR |
| 417 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) |
| 418 | #endif |
| 419 | |
| 420 | #ifndef FTRACE_REGS_ADDR |
| 421 | #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS |
| 422 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) |
| 423 | #else |
| 424 | # define FTRACE_REGS_ADDR FTRACE_ADDR |
| 425 | #endif |
| 426 | #endif |
| 427 | |
| 428 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 429 | extern void ftrace_graph_caller(void); |
| 430 | extern int ftrace_enable_ftrace_graph_caller(void); |
| 431 | extern int ftrace_disable_ftrace_graph_caller(void); |
| 432 | #else |
| 433 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } |
| 434 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } |
| 435 | #endif |
| 436 | |
| 437 | /** |
| 438 | * ftrace_make_nop - convert code into nop |
| 439 | * @mod: module structure if called by module load initialization |
| 440 | * @rec: the mcount call site record |
| 441 | * @addr: the address that the call site should be calling |
| 442 | * |
| 443 | * This is a very sensitive operation and great care needs |
| 444 | * to be taken by the arch. The operation should carefully |
| 445 | * read the location, check to see if what is read is indeed |
| 446 | * what we expect it to be, and then on success of the compare, |
| 447 | * it should write to the location. |
| 448 | * |
| 449 | * The code segment at @rec->ip should be a caller to @addr |
| 450 | * |
| 451 | * Return must be: |
| 452 | * 0 on success |
| 453 | * -EFAULT on error reading the location |
| 454 | * -EINVAL on a failed compare of the contents |
| 455 | * -EPERM on error writing to the location |
| 456 | * Any other value will be considered a failure. |
| 457 | */ |
| 458 | extern int ftrace_make_nop(struct module *mod, |
| 459 | struct dyn_ftrace *rec, unsigned long addr); |
| 460 | |
| 461 | /** |
| 462 | * ftrace_make_call - convert a nop call site into a call to addr |
| 463 | * @rec: the mcount call site record |
| 464 | * @addr: the address that the call site should call |
| 465 | * |
| 466 | * This is a very sensitive operation and great care needs |
| 467 | * to be taken by the arch. The operation should carefully |
| 468 | * read the location, check to see if what is read is indeed |
| 469 | * what we expect it to be, and then on success of the compare, |
| 470 | * it should write to the location. |
| 471 | * |
| 472 | * The code segment at @rec->ip should be a nop |
| 473 | * |
| 474 | * Return must be: |
| 475 | * 0 on success |
| 476 | * -EFAULT on error reading the location |
| 477 | * -EINVAL on a failed compare of the contents |
| 478 | * -EPERM on error writing to the location |
| 479 | * Any other value will be considered a failure. |
| 480 | */ |
| 481 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
| 482 | |
| 483 | #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS |
| 484 | /** |
| 485 | * ftrace_modify_call - convert from one addr to another (no nop) |
| 486 | * @rec: the mcount call site record |
| 487 | * @old_addr: the address expected to be currently called to |
| 488 | * @addr: the address to change to |
| 489 | * |
| 490 | * This is a very sensitive operation and great care needs |
| 491 | * to be taken by the arch. The operation should carefully |
| 492 | * read the location, check to see if what is read is indeed |
| 493 | * what we expect it to be, and then on success of the compare, |
| 494 | * it should write to the location. |
| 495 | * |
| 496 | * The code segment at @rec->ip should be a caller to @old_addr |
| 497 | * |
| 498 | * Return must be: |
| 499 | * 0 on success |
| 500 | * -EFAULT on error reading the location |
| 501 | * -EINVAL on a failed compare of the contents |
| 502 | * -EPERM on error writing to the location |
| 503 | * Any other value will be considered a failure. |
| 504 | */ |
| 505 | extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
| 506 | unsigned long addr); |
| 507 | #else |
| 508 | /* Should never be called */ |
| 509 | static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
| 510 | unsigned long addr) |
| 511 | { |
| 512 | return -EINVAL; |
| 513 | } |
| 514 | #endif |
| 515 | |
| 516 | /* May be defined in arch */ |
| 517 | extern int ftrace_arch_read_dyn_info(char *buf, int size); |
| 518 | |
| 519 | extern int skip_trace(unsigned long ip); |
| 520 | |
| 521 | extern void ftrace_disable_daemon(void); |
| 522 | extern void ftrace_enable_daemon(void); |
| 523 | #else |
| 524 | static inline int skip_trace(unsigned long ip) { return 0; } |
| 525 | static inline int ftrace_force_update(void) { return 0; } |
| 526 | static inline void ftrace_disable_daemon(void) { } |
| 527 | static inline void ftrace_enable_daemon(void) { } |
| 528 | static inline void ftrace_release_mod(struct module *mod) {} |
| 529 | static inline int register_ftrace_command(struct ftrace_func_command *cmd) |
| 530 | { |
| 531 | return -EINVAL; |
| 532 | } |
| 533 | static inline int unregister_ftrace_command(char *cmd_name) |
| 534 | { |
| 535 | return -EINVAL; |
| 536 | } |
| 537 | static inline int ftrace_text_reserved(void *start, void *end) |
| 538 | { |
| 539 | return 0; |
| 540 | } |
| 541 | |
| 542 | /* |
| 543 | * Again users of functions that have ftrace_ops may not |
| 544 | * have them defined when ftrace is not enabled, but these |
| 545 | * functions may still be called. Use a macro instead of inline. |
| 546 | */ |
| 547 | #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) |
| 548 | #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) |
| 549 | #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) |
| 550 | #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) |
| 551 | #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) |
| 552 | #define ftrace_free_filter(ops) do { } while (0) |
| 553 | |
| 554 | static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, |
| 555 | size_t cnt, loff_t *ppos) { return -ENODEV; } |
| 556 | static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, |
| 557 | size_t cnt, loff_t *ppos) { return -ENODEV; } |
| 558 | static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin) |
| 559 | { |
| 560 | return -ENODEV; |
| 561 | } |
| 562 | static inline int |
| 563 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } |
| 564 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 565 | |
| 566 | /* totally disable ftrace - can not re-enable after this */ |
| 567 | void ftrace_kill(void); |
| 568 | |
| 569 | static inline void tracer_disable(void) |
| 570 | { |
| 571 | #ifdef CONFIG_FUNCTION_TRACER |
| 572 | ftrace_enabled = 0; |
| 573 | #endif |
| 574 | } |
| 575 | |
| 576 | /* |
| 577 | * Ftrace disable/restore without lock. Some synchronization mechanism |
| 578 | * must be used to prevent ftrace_enabled to be changed between |
| 579 | * disable/restore. |
| 580 | */ |
| 581 | static inline int __ftrace_enabled_save(void) |
| 582 | { |
| 583 | #ifdef CONFIG_FUNCTION_TRACER |
| 584 | int saved_ftrace_enabled = ftrace_enabled; |
| 585 | ftrace_enabled = 0; |
| 586 | return saved_ftrace_enabled; |
| 587 | #else |
| 588 | return 0; |
| 589 | #endif |
| 590 | } |
| 591 | |
| 592 | static inline void __ftrace_enabled_restore(int enabled) |
| 593 | { |
| 594 | #ifdef CONFIG_FUNCTION_TRACER |
| 595 | ftrace_enabled = enabled; |
| 596 | #endif |
| 597 | } |
| 598 | |
| 599 | #ifndef HAVE_ARCH_CALLER_ADDR |
| 600 | # ifdef CONFIG_FRAME_POINTER |
| 601 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) |
| 602 | # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) |
| 603 | # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) |
| 604 | # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) |
| 605 | # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) |
| 606 | # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) |
| 607 | # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) |
| 608 | # else |
| 609 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) |
| 610 | # define CALLER_ADDR1 0UL |
| 611 | # define CALLER_ADDR2 0UL |
| 612 | # define CALLER_ADDR3 0UL |
| 613 | # define CALLER_ADDR4 0UL |
| 614 | # define CALLER_ADDR5 0UL |
| 615 | # define CALLER_ADDR6 0UL |
| 616 | # endif |
| 617 | #endif /* ifndef HAVE_ARCH_CALLER_ADDR */ |
| 618 | |
| 619 | #ifdef CONFIG_IRQSOFF_TRACER |
| 620 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); |
| 621 | extern void time_hardirqs_off(unsigned long a0, unsigned long a1); |
| 622 | #else |
| 623 | static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } |
| 624 | static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } |
| 625 | #endif |
| 626 | |
| 627 | #ifdef CONFIG_PREEMPT_TRACER |
| 628 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); |
| 629 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); |
| 630 | #else |
| 631 | /* |
| 632 | * Use defines instead of static inlines because some arches will make code out |
| 633 | * of the CALLER_ADDR, when we really want these to be a real nop. |
| 634 | */ |
| 635 | # define trace_preempt_on(a0, a1) do { } while (0) |
| 636 | # define trace_preempt_off(a0, a1) do { } while (0) |
| 637 | #endif |
| 638 | |
| 639 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
| 640 | extern void ftrace_init(void); |
| 641 | #else |
| 642 | static inline void ftrace_init(void) { } |
| 643 | #endif |
| 644 | |
| 645 | /* |
| 646 | * Structure that defines an entry function trace. |
| 647 | */ |
| 648 | struct ftrace_graph_ent { |
| 649 | unsigned long func; /* Current function */ |
| 650 | int depth; |
| 651 | }; |
| 652 | |
| 653 | /* |
| 654 | * Structure that defines a return function trace. |
| 655 | */ |
| 656 | struct ftrace_graph_ret { |
| 657 | unsigned long func; /* Current function */ |
| 658 | unsigned long long calltime; |
| 659 | unsigned long long rettime; |
| 660 | /* Number of functions that overran the depth limit for current task */ |
| 661 | unsigned long overrun; |
| 662 | int depth; |
| 663 | }; |
| 664 | |
| 665 | /* Type of the callback handlers for tracing function graph*/ |
| 666 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ |
| 667 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ |
| 668 | |
| 669 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 670 | |
| 671 | /* for init task */ |
| 672 | #define INIT_FTRACE_GRAPH .ret_stack = NULL, |
| 673 | |
| 674 | /* |
| 675 | * Stack of return addresses for functions |
| 676 | * of a thread. |
| 677 | * Used in struct thread_info |
| 678 | */ |
| 679 | struct ftrace_ret_stack { |
| 680 | unsigned long ret; |
| 681 | unsigned long func; |
| 682 | unsigned long long calltime; |
| 683 | unsigned long long subtime; |
| 684 | unsigned long fp; |
| 685 | }; |
| 686 | |
| 687 | /* |
| 688 | * Primary handler of a function return. |
| 689 | * It relays on ftrace_return_to_handler. |
| 690 | * Defined in entry_32/64.S |
| 691 | */ |
| 692 | extern void return_to_handler(void); |
| 693 | |
| 694 | extern int |
| 695 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
| 696 | unsigned long frame_pointer); |
| 697 | |
| 698 | /* |
| 699 | * Sometimes we don't want to trace a function with the function |
| 700 | * graph tracer but we want them to keep traced by the usual function |
| 701 | * tracer if the function graph tracer is not configured. |
| 702 | */ |
| 703 | #define __notrace_funcgraph notrace |
| 704 | |
| 705 | /* |
| 706 | * We want to which function is an entrypoint of a hardirq. |
| 707 | * That will help us to put a signal on output. |
| 708 | */ |
| 709 | #define __irq_entry __attribute__((__section__(".irqentry.text"))) |
| 710 | |
| 711 | /* Limits of hardirq entrypoints */ |
| 712 | extern char __irqentry_text_start[]; |
| 713 | extern char __irqentry_text_end[]; |
| 714 | |
| 715 | #define FTRACE_RETFUNC_DEPTH 50 |
| 716 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 |
| 717 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
| 718 | trace_func_graph_ent_t entryfunc); |
| 719 | |
| 720 | extern void ftrace_graph_stop(void); |
| 721 | |
| 722 | /* The current handlers in use */ |
| 723 | extern trace_func_graph_ret_t ftrace_graph_return; |
| 724 | extern trace_func_graph_ent_t ftrace_graph_entry; |
| 725 | |
| 726 | extern void unregister_ftrace_graph(void); |
| 727 | |
| 728 | extern void ftrace_graph_init_task(struct task_struct *t); |
| 729 | extern void ftrace_graph_exit_task(struct task_struct *t); |
| 730 | extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); |
| 731 | |
| 732 | static inline int task_curr_ret_stack(struct task_struct *t) |
| 733 | { |
| 734 | return t->curr_ret_stack; |
| 735 | } |
| 736 | |
| 737 | static inline void pause_graph_tracing(void) |
| 738 | { |
| 739 | atomic_inc(¤t->tracing_graph_pause); |
| 740 | } |
| 741 | |
| 742 | static inline void unpause_graph_tracing(void) |
| 743 | { |
| 744 | atomic_dec(¤t->tracing_graph_pause); |
| 745 | } |
| 746 | #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
| 747 | |
| 748 | #define __notrace_funcgraph |
| 749 | #define __irq_entry |
| 750 | #define INIT_FTRACE_GRAPH |
| 751 | |
| 752 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
| 753 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } |
| 754 | static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } |
| 755 | |
| 756 | static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
| 757 | trace_func_graph_ent_t entryfunc) |
| 758 | { |
| 759 | return -1; |
| 760 | } |
| 761 | static inline void unregister_ftrace_graph(void) { } |
| 762 | |
| 763 | static inline int task_curr_ret_stack(struct task_struct *tsk) |
| 764 | { |
| 765 | return -1; |
| 766 | } |
| 767 | |
| 768 | static inline void pause_graph_tracing(void) { } |
| 769 | static inline void unpause_graph_tracing(void) { } |
| 770 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 771 | |
| 772 | #ifdef CONFIG_TRACING |
| 773 | |
| 774 | /* flags for current->trace */ |
| 775 | enum { |
| 776 | TSK_TRACE_FL_TRACE_BIT = 0, |
| 777 | TSK_TRACE_FL_GRAPH_BIT = 1, |
| 778 | }; |
| 779 | enum { |
| 780 | TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, |
| 781 | TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, |
| 782 | }; |
| 783 | |
| 784 | static inline void set_tsk_trace_trace(struct task_struct *tsk) |
| 785 | { |
| 786 | set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); |
| 787 | } |
| 788 | |
| 789 | static inline void clear_tsk_trace_trace(struct task_struct *tsk) |
| 790 | { |
| 791 | clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); |
| 792 | } |
| 793 | |
| 794 | static inline int test_tsk_trace_trace(struct task_struct *tsk) |
| 795 | { |
| 796 | return tsk->trace & TSK_TRACE_FL_TRACE; |
| 797 | } |
| 798 | |
| 799 | static inline void set_tsk_trace_graph(struct task_struct *tsk) |
| 800 | { |
| 801 | set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); |
| 802 | } |
| 803 | |
| 804 | static inline void clear_tsk_trace_graph(struct task_struct *tsk) |
| 805 | { |
| 806 | clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); |
| 807 | } |
| 808 | |
| 809 | static inline int test_tsk_trace_graph(struct task_struct *tsk) |
| 810 | { |
| 811 | return tsk->trace & TSK_TRACE_FL_GRAPH; |
| 812 | } |
| 813 | |
| 814 | enum ftrace_dump_mode; |
| 815 | |
| 816 | extern enum ftrace_dump_mode ftrace_dump_on_oops; |
| 817 | |
| 818 | #ifdef CONFIG_PREEMPT |
| 819 | #define INIT_TRACE_RECURSION .trace_recursion = 0, |
| 820 | #endif |
| 821 | |
| 822 | #endif /* CONFIG_TRACING */ |
| 823 | |
| 824 | #ifndef INIT_TRACE_RECURSION |
| 825 | #define INIT_TRACE_RECURSION |
| 826 | #endif |
| 827 | |
| 828 | #ifdef CONFIG_FTRACE_SYSCALLS |
| 829 | |
| 830 | unsigned long arch_syscall_addr(int nr); |
| 831 | |
| 832 | #endif /* CONFIG_FTRACE_SYSCALLS */ |
| 833 | |
| 834 | #endif /* _LINUX_FTRACE_H */ |