2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/kthread.h>
22 #include <linux/hardirq.h>
23 #include <linux/ftrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/sysctl.h>
26 #include <linux/hash.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
32 /* ftrace_enabled is a method to turn ftrace on or off */
33 int ftrace_enabled __read_mostly
;
34 static int last_ftrace_enabled
;
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
40 static int ftrace_disabled __read_mostly
;
42 static DEFINE_SPINLOCK(ftrace_lock
);
43 static DEFINE_MUTEX(ftrace_sysctl_lock
);
45 static struct ftrace_ops ftrace_list_end __read_mostly
=
50 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
51 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
53 /* mcount is defined per arch in assembly */
54 EXPORT_SYMBOL(mcount
);
56 void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
58 struct ftrace_ops
*op
= ftrace_list
;
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
63 while (op
!= &ftrace_list_end
) {
65 read_barrier_depends();
66 op
->func(ip
, parent_ip
);
72 * clear_ftrace_function - reset the ftrace function
74 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
77 void clear_ftrace_function(void)
79 ftrace_trace_function
= ftrace_stub
;
82 static int __register_ftrace_function(struct ftrace_ops
*ops
)
84 /* Should never be called by interrupts */
85 spin_lock(&ftrace_lock
);
87 ops
->next
= ftrace_list
;
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
102 if (ops
->next
== &ftrace_list_end
)
103 ftrace_trace_function
= ops
->func
;
105 ftrace_trace_function
= ftrace_list_func
;
108 spin_unlock(&ftrace_lock
);
113 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
115 struct ftrace_ops
**p
;
118 spin_lock(&ftrace_lock
);
121 * If we are removing the last function, then simply point
122 * to the ftrace_stub.
124 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
125 ftrace_trace_function
= ftrace_stub
;
126 ftrace_list
= &ftrace_list_end
;
130 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
141 if (ftrace_enabled
) {
142 /* If we only have one func left, then call that directly */
143 if (ftrace_list
== &ftrace_list_end
||
144 ftrace_list
->next
== &ftrace_list_end
)
145 ftrace_trace_function
= ftrace_list
->func
;
149 spin_unlock(&ftrace_lock
);
154 #ifdef CONFIG_DYNAMIC_FTRACE
156 static struct task_struct
*ftraced_task
;
157 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters
);
158 static unsigned long ftraced_iteration_counter
;
161 FTRACE_ENABLE_CALLS
= (1 << 0),
162 FTRACE_DISABLE_CALLS
= (1 << 1),
163 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
164 FTRACE_ENABLE_MCOUNT
= (1 << 3),
165 FTRACE_DISABLE_MCOUNT
= (1 << 4),
168 static int ftrace_filtered
;
170 static struct hlist_head ftrace_hash
[FTRACE_HASHSIZE
];
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu
);
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock
);
175 static DEFINE_MUTEX(ftraced_lock
);
176 static DEFINE_MUTEX(ftrace_filter_lock
);
179 struct ftrace_page
*next
;
181 struct dyn_ftrace records
[];
182 } __attribute__((packed
));
184 #define ENTRIES_PER_PAGE \
185 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
187 /* estimate from running different kernels */
188 #define NR_TO_INIT 10000
190 static struct ftrace_page
*ftrace_pages_start
;
191 static struct ftrace_page
*ftrace_pages
;
193 static int ftraced_trigger
;
194 static int ftraced_suspend
;
196 static int ftrace_record_suspend
;
198 static struct dyn_ftrace
*ftrace_free_records
;
201 ftrace_ip_in_hash(unsigned long ip
, unsigned long key
)
203 struct dyn_ftrace
*p
;
204 struct hlist_node
*t
;
207 hlist_for_each_entry(p
, t
, &ftrace_hash
[key
], node
) {
218 ftrace_add_hash(struct dyn_ftrace
*node
, unsigned long key
)
220 hlist_add_head(&node
->node
, &ftrace_hash
[key
]);
223 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
225 /* no locking, only called from kstop_machine */
227 rec
->ip
= (unsigned long)ftrace_free_records
;
228 ftrace_free_records
= rec
;
229 rec
->flags
|= FTRACE_FL_FREE
;
232 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
234 struct dyn_ftrace
*rec
;
236 /* First check for freed records */
237 if (ftrace_free_records
) {
238 rec
= ftrace_free_records
;
240 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
242 ftrace_free_records
= NULL
;
248 ftrace_free_records
= (void *)rec
->ip
;
249 memset(rec
, 0, sizeof(*rec
));
253 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
254 if (!ftrace_pages
->next
)
256 ftrace_pages
= ftrace_pages
->next
;
259 return &ftrace_pages
->records
[ftrace_pages
->index
++];
263 ftrace_record_ip(unsigned long ip
)
265 struct dyn_ftrace
*node
;
271 if (!ftrace_enabled
|| ftrace_disabled
)
274 resched
= need_resched();
275 preempt_disable_notrace();
277 /* We simply need to protect against recursion */
278 __get_cpu_var(ftrace_shutdown_disable_cpu
)++;
279 if (__get_cpu_var(ftrace_shutdown_disable_cpu
) != 1)
282 if (unlikely(ftrace_record_suspend
))
285 key
= hash_long(ip
, FTRACE_HASHBITS
);
287 WARN_ON_ONCE(key
>= FTRACE_HASHSIZE
);
289 if (ftrace_ip_in_hash(ip
, key
))
292 atomic
= irqs_disabled();
294 spin_lock_irqsave(&ftrace_shutdown_lock
, flags
);
296 /* This ip may have hit the hash before the lock */
297 if (ftrace_ip_in_hash(ip
, key
))
301 * There's a slight race that the ftraced will update the
302 * hash and reset here. If it is already converted, skip it.
304 if (ftrace_ip_converted(ip
))
307 node
= ftrace_alloc_dyn_node(ip
);
313 ftrace_add_hash(node
, key
);
318 spin_unlock_irqrestore(&ftrace_shutdown_lock
, flags
);
320 __get_cpu_var(ftrace_shutdown_disable_cpu
)--;
322 /* prevent recursion with scheduler */
324 preempt_enable_no_resched_notrace();
326 preempt_enable_notrace();
329 #define FTRACE_ADDR ((long)(ftrace_caller))
330 #define MCOUNT_ADDR ((long)(mcount))
333 __ftrace_replace_code(struct dyn_ftrace
*rec
,
334 unsigned char *old
, unsigned char *new, int enable
)
341 if (ftrace_filtered
&& enable
) {
344 * If filtering is on:
346 * If this record is set to be filtered and
347 * is enabled then do nothing.
349 * If this record is set to be filtered and
350 * it is not enabled, enable it.
352 * If this record is not set to be filtered
353 * and it is not enabled do nothing.
355 * If this record is not set to be filtered and
356 * it is enabled, disable it.
358 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
360 if ((fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
)) ||
365 * If it is enabled disable it,
366 * otherwise enable it!
368 if (fl
== FTRACE_FL_ENABLED
) {
369 /* swap new and old */
371 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
372 rec
->flags
&= ~FTRACE_FL_ENABLED
;
374 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
375 rec
->flags
|= FTRACE_FL_ENABLED
;
380 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
382 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
385 if (rec
->flags
& FTRACE_FL_ENABLED
)
387 rec
->flags
|= FTRACE_FL_ENABLED
;
389 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
391 rec
->flags
&= ~FTRACE_FL_ENABLED
;
395 failed
= ftrace_modify_code(ip
, old
, new);
398 /* It is possible that the function hasn't been converted yet */
399 key
= hash_long(ip
, FTRACE_HASHBITS
);
400 if (!ftrace_ip_in_hash(ip
, key
)) {
401 rec
->flags
|= FTRACE_FL_FAILED
;
402 ftrace_free_rec(rec
);
408 static void ftrace_replace_code(int enable
)
410 unsigned char *new = NULL
, *old
= NULL
;
411 struct dyn_ftrace
*rec
;
412 struct ftrace_page
*pg
;
416 old
= ftrace_nop_replace();
418 new = ftrace_nop_replace();
420 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
421 for (i
= 0; i
< pg
->index
; i
++) {
422 rec
= &pg
->records
[i
];
424 /* don't modify code that has already faulted */
425 if (rec
->flags
& FTRACE_FL_FAILED
)
428 __ftrace_replace_code(rec
, old
, new, enable
);
433 static void ftrace_shutdown_replenish(void)
435 if (ftrace_pages
->next
)
438 /* allocate another page */
439 ftrace_pages
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
443 ftrace_code_disable(struct dyn_ftrace
*rec
)
446 unsigned char *nop
, *call
;
451 nop
= ftrace_nop_replace();
452 call
= ftrace_call_replace(ip
, MCOUNT_ADDR
);
454 failed
= ftrace_modify_code(ip
, call
, nop
);
456 rec
->flags
|= FTRACE_FL_FAILED
;
457 ftrace_free_rec(rec
);
461 static int __ftrace_modify_code(void *data
)
466 if (*command
& FTRACE_ENABLE_CALLS
)
467 ftrace_replace_code(1);
468 else if (*command
& FTRACE_DISABLE_CALLS
)
469 ftrace_replace_code(0);
471 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
472 ftrace_update_ftrace_func(ftrace_trace_function
);
474 if (*command
& FTRACE_ENABLE_MCOUNT
) {
475 addr
= (unsigned long)ftrace_record_ip
;
476 ftrace_mcount_set(&addr
);
477 } else if (*command
& FTRACE_DISABLE_MCOUNT
) {
478 addr
= (unsigned long)ftrace_stub
;
479 ftrace_mcount_set(&addr
);
485 static void ftrace_run_update_code(int command
)
487 stop_machine_run(__ftrace_modify_code
, &command
, NR_CPUS
);
490 static ftrace_func_t saved_ftrace_func
;
492 static void ftrace_startup(void)
496 if (unlikely(ftrace_disabled
))
499 mutex_lock(&ftraced_lock
);
501 if (ftraced_suspend
== 1)
502 command
|= FTRACE_ENABLE_CALLS
;
504 if (saved_ftrace_func
!= ftrace_trace_function
) {
505 saved_ftrace_func
= ftrace_trace_function
;
506 command
|= FTRACE_UPDATE_TRACE_FUNC
;
509 if (!command
|| !ftrace_enabled
)
512 ftrace_run_update_code(command
);
514 mutex_unlock(&ftraced_lock
);
517 static void ftrace_shutdown(void)
521 if (unlikely(ftrace_disabled
))
524 mutex_lock(&ftraced_lock
);
526 if (!ftraced_suspend
)
527 command
|= FTRACE_DISABLE_CALLS
;
529 if (saved_ftrace_func
!= ftrace_trace_function
) {
530 saved_ftrace_func
= ftrace_trace_function
;
531 command
|= FTRACE_UPDATE_TRACE_FUNC
;
534 if (!command
|| !ftrace_enabled
)
537 ftrace_run_update_code(command
);
539 mutex_unlock(&ftraced_lock
);
542 static void ftrace_startup_sysctl(void)
544 int command
= FTRACE_ENABLE_MCOUNT
;
546 if (unlikely(ftrace_disabled
))
549 mutex_lock(&ftraced_lock
);
550 /* Force update next time */
551 saved_ftrace_func
= NULL
;
552 /* ftraced_suspend is true if we want ftrace running */
554 command
|= FTRACE_ENABLE_CALLS
;
556 ftrace_run_update_code(command
);
557 mutex_unlock(&ftraced_lock
);
560 static void ftrace_shutdown_sysctl(void)
562 int command
= FTRACE_DISABLE_MCOUNT
;
564 if (unlikely(ftrace_disabled
))
567 mutex_lock(&ftraced_lock
);
568 /* ftraced_suspend is true if ftrace is running */
570 command
|= FTRACE_DISABLE_CALLS
;
572 ftrace_run_update_code(command
);
573 mutex_unlock(&ftraced_lock
);
576 static cycle_t ftrace_update_time
;
577 static unsigned long ftrace_update_cnt
;
578 unsigned long ftrace_update_tot_cnt
;
580 static int __ftrace_update_code(void *ignore
)
582 struct dyn_ftrace
*p
;
583 struct hlist_head head
;
584 struct hlist_node
*t
;
585 int save_ftrace_enabled
;
589 /* Don't be recording funcs now */
590 save_ftrace_enabled
= ftrace_enabled
;
593 start
= ftrace_now(raw_smp_processor_id());
594 ftrace_update_cnt
= 0;
596 /* No locks needed, the machine is stopped! */
597 for (i
= 0; i
< FTRACE_HASHSIZE
; i
++) {
598 if (hlist_empty(&ftrace_hash
[i
]))
601 head
= ftrace_hash
[i
];
602 INIT_HLIST_HEAD(&ftrace_hash
[i
]);
604 /* all CPUS are stopped, we are safe to modify code */
605 hlist_for_each_entry(p
, t
, &head
, node
) {
606 ftrace_code_disable(p
);
612 stop
= ftrace_now(raw_smp_processor_id());
613 ftrace_update_time
= stop
- start
;
614 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
616 ftrace_enabled
= save_ftrace_enabled
;
621 static void ftrace_update_code(void)
623 if (unlikely(ftrace_disabled
))
626 stop_machine_run(__ftrace_update_code
, NULL
, NR_CPUS
);
629 static int ftraced(void *ignore
)
633 set_current_state(TASK_INTERRUPTIBLE
);
635 while (!kthread_should_stop()) {
637 /* check once a second */
638 schedule_timeout(HZ
);
640 if (unlikely(ftrace_disabled
))
643 mutex_lock(&ftrace_sysctl_lock
);
644 mutex_lock(&ftraced_lock
);
645 if (ftrace_enabled
&& ftraced_trigger
&& !ftraced_suspend
) {
646 ftrace_record_suspend
++;
647 ftrace_update_code();
648 usecs
= nsecs_to_usecs(ftrace_update_time
);
649 if (ftrace_update_tot_cnt
> 100000) {
650 ftrace_update_tot_cnt
= 0;
651 pr_info("hm, dftrace overflow: %lu change%s"
652 " (%lu total) in %lu usec%s\n",
654 ftrace_update_cnt
!= 1 ? "s" : "",
655 ftrace_update_tot_cnt
,
656 usecs
, usecs
!= 1 ? "s" : "");
661 ftrace_record_suspend
--;
663 ftraced_iteration_counter
++;
664 mutex_unlock(&ftraced_lock
);
665 mutex_unlock(&ftrace_sysctl_lock
);
667 wake_up_interruptible(&ftraced_waiters
);
669 ftrace_shutdown_replenish();
671 set_current_state(TASK_INTERRUPTIBLE
);
673 __set_current_state(TASK_RUNNING
);
677 static int __init
ftrace_dyn_table_alloc(void)
679 struct ftrace_page
*pg
;
683 /* allocate a few pages */
684 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
685 if (!ftrace_pages_start
)
689 * Allocate a few more pages.
691 * TODO: have some parser search vmlinux before
692 * final linking to find all calls to ftrace.
694 * a) know how many pages to allocate.
696 * b) set up the table then.
698 * The dynamic code is still necessary for
702 pg
= ftrace_pages
= ftrace_pages_start
;
704 cnt
= NR_TO_INIT
/ ENTRIES_PER_PAGE
;
706 for (i
= 0; i
< cnt
; i
++) {
707 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
709 /* If we fail, we'll try later anyway */
720 FTRACE_ITER_FILTER
= (1 << 0),
721 FTRACE_ITER_CONT
= (1 << 1),
724 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
726 struct ftrace_iterator
{
728 struct ftrace_page
*pg
;
731 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
737 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
739 struct ftrace_iterator
*iter
= m
->private;
740 struct dyn_ftrace
*rec
= NULL
;
745 if (iter
->idx
>= iter
->pg
->index
) {
746 if (iter
->pg
->next
) {
747 iter
->pg
= iter
->pg
->next
;
752 rec
= &iter
->pg
->records
[iter
->idx
++];
753 if ((rec
->flags
& FTRACE_FL_FAILED
) ||
754 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
755 !(rec
->flags
& FTRACE_FL_FILTER
))) {
766 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
768 struct ftrace_iterator
*iter
= m
->private;
772 if (*pos
!= iter
->pos
) {
773 for (p
= t_next(m
, p
, &l
); p
&& l
< *pos
; p
= t_next(m
, p
, &l
))
777 p
= t_next(m
, p
, &l
);
783 static void t_stop(struct seq_file
*m
, void *p
)
787 static int t_show(struct seq_file
*m
, void *v
)
789 struct dyn_ftrace
*rec
= v
;
790 char str
[KSYM_SYMBOL_LEN
];
795 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
797 seq_printf(m
, "%s\n", str
);
802 static struct seq_operations show_ftrace_seq_ops
= {
810 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
812 struct ftrace_iterator
*iter
;
815 if (unlikely(ftrace_disabled
))
818 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
822 iter
->pg
= ftrace_pages_start
;
825 ret
= seq_open(file
, &show_ftrace_seq_ops
);
827 struct seq_file
*m
= file
->private_data
;
837 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
839 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
840 struct ftrace_iterator
*iter
= m
->private;
842 seq_release(inode
, file
);
848 static void ftrace_filter_reset(void)
850 struct ftrace_page
*pg
;
851 struct dyn_ftrace
*rec
;
854 /* keep kstop machine from running */
857 pg
= ftrace_pages_start
;
859 for (i
= 0; i
< pg
->index
; i
++) {
860 rec
= &pg
->records
[i
];
861 if (rec
->flags
& FTRACE_FL_FAILED
)
863 rec
->flags
&= ~FTRACE_FL_FILTER
;
871 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
873 struct ftrace_iterator
*iter
;
876 if (unlikely(ftrace_disabled
))
879 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
883 mutex_lock(&ftrace_filter_lock
);
884 if ((file
->f_mode
& FMODE_WRITE
) &&
885 !(file
->f_flags
& O_APPEND
))
886 ftrace_filter_reset();
888 if (file
->f_mode
& FMODE_READ
) {
889 iter
->pg
= ftrace_pages_start
;
891 iter
->flags
= FTRACE_ITER_FILTER
;
893 ret
= seq_open(file
, &show_ftrace_seq_ops
);
895 struct seq_file
*m
= file
->private_data
;
900 file
->private_data
= iter
;
901 mutex_unlock(&ftrace_filter_lock
);
907 ftrace_filter_read(struct file
*file
, char __user
*ubuf
,
908 size_t cnt
, loff_t
*ppos
)
910 if (file
->f_mode
& FMODE_READ
)
911 return seq_read(file
, ubuf
, cnt
, ppos
);
917 ftrace_filter_lseek(struct file
*file
, loff_t offset
, int origin
)
921 if (file
->f_mode
& FMODE_READ
)
922 ret
= seq_lseek(file
, offset
, origin
);
924 file
->f_pos
= ret
= 1;
937 ftrace_match(unsigned char *buff
, int len
)
939 char str
[KSYM_SYMBOL_LEN
];
941 struct ftrace_page
*pg
;
942 struct dyn_ftrace
*rec
;
943 int type
= MATCH_FULL
;
944 unsigned i
, match
= 0, search_len
= 0;
946 for (i
= 0; i
< len
; i
++) {
947 if (buff
[i
] == '*') {
949 search
= buff
+ i
+ 1;
950 type
= MATCH_END_ONLY
;
951 search_len
= len
- (i
+ 1);
953 if (type
== MATCH_END_ONLY
) {
954 type
= MATCH_MIDDLE_ONLY
;
957 type
= MATCH_FRONT_ONLY
;
965 /* keep kstop machine from running */
968 pg
= ftrace_pages_start
;
970 for (i
= 0; i
< pg
->index
; i
++) {
974 rec
= &pg
->records
[i
];
975 if (rec
->flags
& FTRACE_FL_FAILED
)
977 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
980 if (strcmp(str
, buff
) == 0)
983 case MATCH_FRONT_ONLY
:
984 if (memcmp(str
, buff
, match
) == 0)
987 case MATCH_MIDDLE_ONLY
:
988 if (strstr(str
, search
))
992 ptr
= strstr(str
, search
);
993 if (ptr
&& (ptr
[search_len
] == 0))
998 rec
->flags
|= FTRACE_FL_FILTER
;
1006 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1007 size_t cnt
, loff_t
*ppos
)
1009 struct ftrace_iterator
*iter
;
1014 if (!cnt
|| cnt
< 0)
1017 mutex_lock(&ftrace_filter_lock
);
1019 if (file
->f_mode
& FMODE_READ
) {
1020 struct seq_file
*m
= file
->private_data
;
1023 iter
= file
->private_data
;
1026 iter
->flags
&= ~FTRACE_ITER_CONT
;
1027 iter
->buffer_idx
= 0;
1030 ret
= get_user(ch
, ubuf
++);
1036 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1037 /* skip white space */
1038 while (cnt
&& isspace(ch
)) {
1039 ret
= get_user(ch
, ubuf
++);
1048 file
->f_pos
+= read
;
1053 iter
->buffer_idx
= 0;
1056 while (cnt
&& !isspace(ch
)) {
1057 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1058 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1063 ret
= get_user(ch
, ubuf
++);
1072 iter
->buffer
[iter
->buffer_idx
] = 0;
1073 ftrace_match(iter
->buffer
, iter
->buffer_idx
);
1074 iter
->buffer_idx
= 0;
1076 iter
->flags
|= FTRACE_ITER_CONT
;
1079 file
->f_pos
+= read
;
1083 mutex_unlock(&ftrace_filter_lock
);
1089 * ftrace_set_filter - set a function to filter on in ftrace
1090 * @buf - the string that holds the function filter text.
1091 * @len - the length of the string.
1092 * @reset - non zero to reset all filters before applying this filter.
1094 * Filters denote which functions should be enabled when tracing is enabled.
1095 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1097 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1099 if (unlikely(ftrace_disabled
))
1102 mutex_lock(&ftrace_filter_lock
);
1104 ftrace_filter_reset();
1106 ftrace_match(buf
, len
);
1107 mutex_unlock(&ftrace_filter_lock
);
1111 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1113 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1114 struct ftrace_iterator
*iter
;
1116 mutex_lock(&ftrace_filter_lock
);
1117 if (file
->f_mode
& FMODE_READ
) {
1120 seq_release(inode
, file
);
1122 iter
= file
->private_data
;
1124 if (iter
->buffer_idx
) {
1126 iter
->buffer
[iter
->buffer_idx
] = 0;
1127 ftrace_match(iter
->buffer
, iter
->buffer_idx
);
1130 mutex_lock(&ftrace_sysctl_lock
);
1131 mutex_lock(&ftraced_lock
);
1132 if (iter
->filtered
&& ftraced_suspend
&& ftrace_enabled
)
1133 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1134 mutex_unlock(&ftraced_lock
);
1135 mutex_unlock(&ftrace_sysctl_lock
);
1138 mutex_unlock(&ftrace_filter_lock
);
1142 static struct file_operations ftrace_avail_fops
= {
1143 .open
= ftrace_avail_open
,
1145 .llseek
= seq_lseek
,
1146 .release
= ftrace_avail_release
,
1149 static struct file_operations ftrace_filter_fops
= {
1150 .open
= ftrace_filter_open
,
1151 .read
= ftrace_filter_read
,
1152 .write
= ftrace_filter_write
,
1153 .llseek
= ftrace_filter_lseek
,
1154 .release
= ftrace_filter_release
,
1158 * ftrace_force_update - force an update to all recording ftrace functions
1160 * The ftrace dynamic update daemon only wakes up once a second.
1161 * There may be cases where an update needs to be done immediately
1162 * for tests or internal kernel tracing to begin. This function
1163 * wakes the daemon to do an update and will not return until the
1164 * update is complete.
1166 int ftrace_force_update(void)
1168 unsigned long last_counter
;
1169 DECLARE_WAITQUEUE(wait
, current
);
1172 if (unlikely(ftrace_disabled
))
1175 mutex_lock(&ftraced_lock
);
1176 last_counter
= ftraced_iteration_counter
;
1178 set_current_state(TASK_INTERRUPTIBLE
);
1179 add_wait_queue(&ftraced_waiters
, &wait
);
1181 if (unlikely(!ftraced_task
)) {
1187 mutex_unlock(&ftraced_lock
);
1188 wake_up_process(ftraced_task
);
1190 mutex_lock(&ftraced_lock
);
1191 if (signal_pending(current
)) {
1195 set_current_state(TASK_INTERRUPTIBLE
);
1196 } while (last_counter
== ftraced_iteration_counter
);
1199 mutex_unlock(&ftraced_lock
);
1200 remove_wait_queue(&ftraced_waiters
, &wait
);
1201 set_current_state(TASK_RUNNING
);
1206 static void ftrace_force_shutdown(void)
1208 struct task_struct
*task
;
1209 int command
= FTRACE_DISABLE_CALLS
| FTRACE_UPDATE_TRACE_FUNC
;
1211 mutex_lock(&ftraced_lock
);
1212 task
= ftraced_task
;
1213 ftraced_task
= NULL
;
1214 ftraced_suspend
= -1;
1215 ftrace_run_update_code(command
);
1216 mutex_unlock(&ftraced_lock
);
1222 static __init
int ftrace_init_debugfs(void)
1224 struct dentry
*d_tracer
;
1225 struct dentry
*entry
;
1227 d_tracer
= tracing_init_dentry();
1229 entry
= debugfs_create_file("available_filter_functions", 0444,
1230 d_tracer
, NULL
, &ftrace_avail_fops
);
1232 pr_warning("Could not create debugfs "
1233 "'available_filter_functions' entry\n");
1235 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1236 NULL
, &ftrace_filter_fops
);
1238 pr_warning("Could not create debugfs "
1239 "'set_ftrace_filter' entry\n");
1243 fs_initcall(ftrace_init_debugfs
);
1245 static int __init
ftrace_dynamic_init(void)
1247 struct task_struct
*p
;
1251 addr
= (unsigned long)ftrace_record_ip
;
1253 stop_machine_run(ftrace_dyn_arch_init
, &addr
, NR_CPUS
);
1255 /* ftrace_dyn_arch_init places the return code in addr */
1261 ret
= ftrace_dyn_table_alloc();
1265 p
= kthread_run(ftraced
, NULL
, "ftraced");
1271 last_ftrace_enabled
= ftrace_enabled
= 1;
1277 ftrace_disabled
= 1;
1281 core_initcall(ftrace_dynamic_init
);
1283 # define ftrace_startup() do { } while (0)
1284 # define ftrace_shutdown() do { } while (0)
1285 # define ftrace_startup_sysctl() do { } while (0)
1286 # define ftrace_shutdown_sysctl() do { } while (0)
1287 # define ftrace_force_shutdown() do { } while (0)
1288 #endif /* CONFIG_DYNAMIC_FTRACE */
1291 * ftrace_kill - totally shutdown ftrace
1293 * This is a safety measure. If something was detected that seems
1294 * wrong, calling this function will keep ftrace from doing
1295 * any more modifications, and updates.
1296 * used when something went wrong.
1298 void ftrace_kill(void)
1300 mutex_lock(&ftrace_sysctl_lock
);
1301 ftrace_disabled
= 1;
1304 clear_ftrace_function();
1305 mutex_unlock(&ftrace_sysctl_lock
);
1307 /* Try to totally disable ftrace */
1308 ftrace_force_shutdown();
1312 * register_ftrace_function - register a function for profiling
1313 * @ops - ops structure that holds the function for profiling.
1315 * Register a function to be called by all functions in the
1318 * Note: @ops->func and all the functions it calls must be labeled
1319 * with "notrace", otherwise it will go into a
1322 int register_ftrace_function(struct ftrace_ops
*ops
)
1326 if (unlikely(ftrace_disabled
))
1329 mutex_lock(&ftrace_sysctl_lock
);
1330 ret
= __register_ftrace_function(ops
);
1332 mutex_unlock(&ftrace_sysctl_lock
);
1338 * unregister_ftrace_function - unresgister a function for profiling.
1339 * @ops - ops structure that holds the function to unregister
1341 * Unregister a function that was added to be called by ftrace profiling.
1343 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1347 mutex_lock(&ftrace_sysctl_lock
);
1348 ret
= __unregister_ftrace_function(ops
);
1350 mutex_unlock(&ftrace_sysctl_lock
);
1356 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1357 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1362 if (unlikely(ftrace_disabled
))
1365 mutex_lock(&ftrace_sysctl_lock
);
1367 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1369 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1372 last_ftrace_enabled
= ftrace_enabled
;
1374 if (ftrace_enabled
) {
1376 ftrace_startup_sysctl();
1378 /* we are starting ftrace again */
1379 if (ftrace_list
!= &ftrace_list_end
) {
1380 if (ftrace_list
->next
== &ftrace_list_end
)
1381 ftrace_trace_function
= ftrace_list
->func
;
1383 ftrace_trace_function
= ftrace_list_func
;
1387 /* stopping ftrace calls (just send to ftrace_stub) */
1388 ftrace_trace_function
= ftrace_stub
;
1390 ftrace_shutdown_sysctl();
1394 mutex_unlock(&ftrace_sysctl_lock
);