2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/kthread.h>
22 #include <linux/hardirq.h>
23 #include <linux/ftrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/sysctl.h>
26 #include <linux/hash.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
33 static int last_ftrace_enabled
;
35 static DEFINE_SPINLOCK(ftrace_lock
);
36 static DEFINE_MUTEX(ftrace_sysctl_lock
);
38 static struct ftrace_ops ftrace_list_end __read_mostly
=
43 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
44 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
46 /* mcount is defined per arch in assembly */
47 EXPORT_SYMBOL(mcount
);
49 notrace
void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
51 struct ftrace_ops
*op
= ftrace_list
;
53 /* in case someone actually ports this to alpha! */
54 read_barrier_depends();
56 while (op
!= &ftrace_list_end
) {
58 read_barrier_depends();
59 op
->func(ip
, parent_ip
);
65 * clear_ftrace_function - reset the ftrace function
67 * This NULLs the ftrace function and in essence stops
68 * tracing. There may be lag
70 void clear_ftrace_function(void)
72 ftrace_trace_function
= ftrace_stub
;
75 static int notrace
__register_ftrace_function(struct ftrace_ops
*ops
)
77 /* Should never be called by interrupts */
78 spin_lock(&ftrace_lock
);
80 ops
->next
= ftrace_list
;
82 * We are entering ops into the ftrace_list but another
83 * CPU might be walking that list. We need to make sure
84 * the ops->next pointer is valid before another CPU sees
85 * the ops pointer included into the ftrace_list.
92 * For one func, simply call it directly.
93 * For more than one func, call the chain.
95 if (ops
->next
== &ftrace_list_end
)
96 ftrace_trace_function
= ops
->func
;
98 ftrace_trace_function
= ftrace_list_func
;
101 spin_unlock(&ftrace_lock
);
106 static int notrace
__unregister_ftrace_function(struct ftrace_ops
*ops
)
108 struct ftrace_ops
**p
;
111 spin_lock(&ftrace_lock
);
114 * If we are removing the last function, then simply point
115 * to the ftrace_stub.
117 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
118 ftrace_trace_function
= ftrace_stub
;
119 ftrace_list
= &ftrace_list_end
;
123 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
134 if (ftrace_enabled
) {
135 /* If we only have one func left, then call that directly */
136 if (ftrace_list
== &ftrace_list_end
||
137 ftrace_list
->next
== &ftrace_list_end
)
138 ftrace_trace_function
= ftrace_list
->func
;
142 spin_unlock(&ftrace_lock
);
147 #ifdef CONFIG_DYNAMIC_FTRACE
149 static struct task_struct
*ftraced_task
;
150 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters
);
151 static unsigned long ftraced_iteration_counter
;
154 FTRACE_ENABLE_CALLS
= (1 << 0),
155 FTRACE_DISABLE_CALLS
= (1 << 1),
156 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
157 FTRACE_ENABLE_MCOUNT
= (1 << 3),
158 FTRACE_DISABLE_MCOUNT
= (1 << 4),
161 static int ftrace_filtered
;
163 static struct hlist_head ftrace_hash
[FTRACE_HASHSIZE
];
165 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu
);
167 static DEFINE_SPINLOCK(ftrace_shutdown_lock
);
168 static DEFINE_MUTEX(ftraced_lock
);
169 static DEFINE_MUTEX(ftrace_filter_lock
);
172 struct ftrace_page
*next
;
174 struct dyn_ftrace records
[];
175 } __attribute__((packed
));
177 #define ENTRIES_PER_PAGE \
178 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
180 /* estimate from running different kernels */
181 #define NR_TO_INIT 10000
183 static struct ftrace_page
*ftrace_pages_start
;
184 static struct ftrace_page
*ftrace_pages
;
186 static int ftraced_trigger
;
187 static int ftraced_suspend
;
189 static int ftrace_record_suspend
;
192 notrace
ftrace_ip_in_hash(unsigned long ip
, unsigned long key
)
194 struct dyn_ftrace
*p
;
195 struct hlist_node
*t
;
198 hlist_for_each_entry(p
, t
, &ftrace_hash
[key
], node
) {
208 static inline void notrace
209 ftrace_add_hash(struct dyn_ftrace
*node
, unsigned long key
)
211 hlist_add_head(&node
->node
, &ftrace_hash
[key
]);
214 static notrace
struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
216 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
217 if (!ftrace_pages
->next
)
219 ftrace_pages
= ftrace_pages
->next
;
222 return &ftrace_pages
->records
[ftrace_pages
->index
++];
226 ftrace_record_ip(unsigned long ip
)
228 struct dyn_ftrace
*node
;
237 resched
= need_resched();
238 preempt_disable_notrace();
240 /* We simply need to protect against recursion */
241 __get_cpu_var(ftrace_shutdown_disable_cpu
)++;
242 if (__get_cpu_var(ftrace_shutdown_disable_cpu
) != 1)
245 if (unlikely(ftrace_record_suspend
))
248 key
= hash_long(ip
, FTRACE_HASHBITS
);
250 WARN_ON_ONCE(key
>= FTRACE_HASHSIZE
);
252 if (ftrace_ip_in_hash(ip
, key
))
255 atomic
= irqs_disabled();
257 spin_lock_irqsave(&ftrace_shutdown_lock
, flags
);
259 /* This ip may have hit the hash before the lock */
260 if (ftrace_ip_in_hash(ip
, key
))
264 * There's a slight race that the ftraced will update the
265 * hash and reset here. If it is already converted, skip it.
267 if (ftrace_ip_converted(ip
))
270 node
= ftrace_alloc_dyn_node(ip
);
276 ftrace_add_hash(node
, key
);
281 spin_unlock_irqrestore(&ftrace_shutdown_lock
, flags
);
283 __get_cpu_var(ftrace_shutdown_disable_cpu
)--;
285 /* prevent recursion with scheduler */
287 preempt_enable_no_resched_notrace();
289 preempt_enable_notrace();
292 #define FTRACE_ADDR ((long)(&ftrace_caller))
293 #define MCOUNT_ADDR ((long)(&mcount))
296 __ftrace_replace_code(struct dyn_ftrace
*rec
,
297 unsigned char *old
, unsigned char *new, int enable
)
304 if (ftrace_filtered
&& enable
) {
307 * If filtering is on:
309 * If this record is set to be filtered and
310 * is enabled then do nothing.
312 * If this record is set to be filtered and
313 * it is not enabled, enable it.
315 * If this record is not set to be filtered
316 * and it is not enabled do nothing.
318 * If this record is not set to be filtered and
319 * it is enabled, disable it.
321 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
323 if ((fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
)) ||
328 * If it is enabled disable it,
329 * otherwise enable it!
331 if (fl
== FTRACE_FL_ENABLED
) {
332 /* swap new and old */
334 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
335 rec
->flags
&= ~FTRACE_FL_ENABLED
;
337 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
338 rec
->flags
|= FTRACE_FL_ENABLED
;
343 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
345 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
348 if (rec
->flags
& FTRACE_FL_ENABLED
)
350 rec
->flags
|= FTRACE_FL_ENABLED
;
352 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
354 rec
->flags
&= ~FTRACE_FL_ENABLED
;
358 failed
= ftrace_modify_code(ip
, old
, new);
360 rec
->flags
|= FTRACE_FL_FAILED
;
363 static void notrace
ftrace_replace_code(int enable
)
365 unsigned char *new = NULL
, *old
= NULL
;
366 struct dyn_ftrace
*rec
;
367 struct ftrace_page
*pg
;
371 old
= ftrace_nop_replace();
373 new = ftrace_nop_replace();
375 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
376 for (i
= 0; i
< pg
->index
; i
++) {
377 rec
= &pg
->records
[i
];
379 /* don't modify code that has already faulted */
380 if (rec
->flags
& FTRACE_FL_FAILED
)
383 __ftrace_replace_code(rec
, old
, new, enable
);
388 static notrace
void ftrace_shutdown_replenish(void)
390 if (ftrace_pages
->next
)
393 /* allocate another page */
394 ftrace_pages
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
398 ftrace_code_disable(struct dyn_ftrace
*rec
)
401 unsigned char *nop
, *call
;
406 nop
= ftrace_nop_replace();
407 call
= ftrace_call_replace(ip
, MCOUNT_ADDR
);
409 failed
= ftrace_modify_code(ip
, call
, nop
);
411 rec
->flags
|= FTRACE_FL_FAILED
;
414 static int notrace
__ftrace_modify_code(void *data
)
419 if (*command
& FTRACE_ENABLE_CALLS
)
420 ftrace_replace_code(1);
421 else if (*command
& FTRACE_DISABLE_CALLS
)
422 ftrace_replace_code(0);
424 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
425 ftrace_update_ftrace_func(ftrace_trace_function
);
427 if (*command
& FTRACE_ENABLE_MCOUNT
) {
428 addr
= (unsigned long)ftrace_record_ip
;
429 ftrace_mcount_set(&addr
);
430 } else if (*command
& FTRACE_DISABLE_MCOUNT
) {
431 addr
= (unsigned long)ftrace_stub
;
432 ftrace_mcount_set(&addr
);
438 static void notrace
ftrace_run_update_code(int command
)
440 stop_machine_run(__ftrace_modify_code
, &command
, NR_CPUS
);
443 static ftrace_func_t saved_ftrace_func
;
445 static void notrace
ftrace_startup(void)
449 mutex_lock(&ftraced_lock
);
451 if (ftraced_suspend
== 1)
452 command
|= FTRACE_ENABLE_CALLS
;
454 if (saved_ftrace_func
!= ftrace_trace_function
) {
455 saved_ftrace_func
= ftrace_trace_function
;
456 command
|= FTRACE_UPDATE_TRACE_FUNC
;
459 if (!command
|| !ftrace_enabled
)
462 ftrace_run_update_code(command
);
464 mutex_unlock(&ftraced_lock
);
467 static void notrace
ftrace_shutdown(void)
471 mutex_lock(&ftraced_lock
);
473 if (!ftraced_suspend
)
474 command
|= FTRACE_DISABLE_CALLS
;
476 if (saved_ftrace_func
!= ftrace_trace_function
) {
477 saved_ftrace_func
= ftrace_trace_function
;
478 command
|= FTRACE_UPDATE_TRACE_FUNC
;
481 if (!command
|| !ftrace_enabled
)
484 ftrace_run_update_code(command
);
486 mutex_unlock(&ftraced_lock
);
489 static void notrace
ftrace_startup_sysctl(void)
491 int command
= FTRACE_ENABLE_MCOUNT
;
493 mutex_lock(&ftraced_lock
);
494 /* Force update next time */
495 saved_ftrace_func
= NULL
;
496 /* ftraced_suspend is true if we want ftrace running */
498 command
|= FTRACE_ENABLE_CALLS
;
500 ftrace_run_update_code(command
);
501 mutex_unlock(&ftraced_lock
);
504 static void notrace
ftrace_shutdown_sysctl(void)
506 int command
= FTRACE_DISABLE_MCOUNT
;
508 mutex_lock(&ftraced_lock
);
509 /* ftraced_suspend is true if ftrace is running */
511 command
|= FTRACE_DISABLE_CALLS
;
513 ftrace_run_update_code(command
);
514 mutex_unlock(&ftraced_lock
);
517 static cycle_t ftrace_update_time
;
518 static unsigned long ftrace_update_cnt
;
519 unsigned long ftrace_update_tot_cnt
;
521 static int notrace
__ftrace_update_code(void *ignore
)
523 struct dyn_ftrace
*p
;
524 struct hlist_head head
;
525 struct hlist_node
*t
;
526 int save_ftrace_enabled
;
530 /* Don't be recording funcs now */
531 save_ftrace_enabled
= ftrace_enabled
;
534 start
= now(raw_smp_processor_id());
535 ftrace_update_cnt
= 0;
537 /* No locks needed, the machine is stopped! */
538 for (i
= 0; i
< FTRACE_HASHSIZE
; i
++) {
539 if (hlist_empty(&ftrace_hash
[i
]))
542 head
= ftrace_hash
[i
];
543 INIT_HLIST_HEAD(&ftrace_hash
[i
]);
545 /* all CPUS are stopped, we are safe to modify code */
546 hlist_for_each_entry(p
, t
, &head
, node
) {
547 ftrace_code_disable(p
);
553 stop
= now(raw_smp_processor_id());
554 ftrace_update_time
= stop
- start
;
555 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
557 ftrace_enabled
= save_ftrace_enabled
;
562 static void notrace
ftrace_update_code(void)
564 stop_machine_run(__ftrace_update_code
, NULL
, NR_CPUS
);
567 static int notrace
ftraced(void *ignore
)
571 set_current_state(TASK_INTERRUPTIBLE
);
573 while (!kthread_should_stop()) {
575 /* check once a second */
576 schedule_timeout(HZ
);
578 mutex_lock(&ftrace_sysctl_lock
);
579 mutex_lock(&ftraced_lock
);
580 if (ftrace_enabled
&& ftraced_trigger
&& !ftraced_suspend
) {
581 ftrace_record_suspend
++;
582 ftrace_update_code();
583 usecs
= nsecs_to_usecs(ftrace_update_time
);
584 if (ftrace_update_tot_cnt
> 100000) {
585 ftrace_update_tot_cnt
= 0;
586 pr_info("hm, dftrace overflow: %lu change%s"
587 " (%lu total) in %lu usec%s\n",
589 ftrace_update_cnt
!= 1 ? "s" : "",
590 ftrace_update_tot_cnt
,
591 usecs
, usecs
!= 1 ? "s" : "");
595 ftrace_record_suspend
--;
597 ftraced_iteration_counter
++;
598 mutex_unlock(&ftraced_lock
);
599 mutex_unlock(&ftrace_sysctl_lock
);
601 wake_up_interruptible(&ftraced_waiters
);
603 ftrace_shutdown_replenish();
605 set_current_state(TASK_INTERRUPTIBLE
);
607 __set_current_state(TASK_RUNNING
);
611 static int __init
ftrace_dyn_table_alloc(void)
613 struct ftrace_page
*pg
;
617 /* allocate a few pages */
618 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
619 if (!ftrace_pages_start
)
623 * Allocate a few more pages.
625 * TODO: have some parser search vmlinux before
626 * final linking to find all calls to ftrace.
628 * a) know how many pages to allocate.
630 * b) set up the table then.
632 * The dynamic code is still necessary for
636 pg
= ftrace_pages
= ftrace_pages_start
;
638 cnt
= NR_TO_INIT
/ ENTRIES_PER_PAGE
;
640 for (i
= 0; i
< cnt
; i
++) {
641 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
643 /* If we fail, we'll try later anyway */
654 FTRACE_ITER_FILTER
= (1 << 0),
655 FTRACE_ITER_CONT
= (1 << 1),
658 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
660 struct ftrace_iterator
{
662 struct ftrace_page
*pg
;
665 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
670 static void notrace
*
671 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
673 struct ftrace_iterator
*iter
= m
->private;
674 struct dyn_ftrace
*rec
= NULL
;
679 if (iter
->idx
>= iter
->pg
->index
) {
680 if (iter
->pg
->next
) {
681 iter
->pg
= iter
->pg
->next
;
686 rec
= &iter
->pg
->records
[iter
->idx
++];
687 if ((rec
->flags
& FTRACE_FL_FAILED
) ||
688 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
689 !(rec
->flags
& FTRACE_FL_FILTER
))) {
700 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
702 struct ftrace_iterator
*iter
= m
->private;
706 if (*pos
!= iter
->pos
) {
707 for (p
= t_next(m
, p
, &l
); p
&& l
< *pos
; p
= t_next(m
, p
, &l
))
711 p
= t_next(m
, p
, &l
);
717 static void t_stop(struct seq_file
*m
, void *p
)
721 static int t_show(struct seq_file
*m
, void *v
)
723 struct dyn_ftrace
*rec
= v
;
724 char str
[KSYM_SYMBOL_LEN
];
729 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
731 seq_printf(m
, "%s\n", str
);
736 static struct seq_operations show_ftrace_seq_ops
= {
744 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
746 struct ftrace_iterator
*iter
;
749 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
753 iter
->pg
= ftrace_pages_start
;
756 ret
= seq_open(file
, &show_ftrace_seq_ops
);
758 struct seq_file
*m
= file
->private_data
;
766 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
768 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
769 struct ftrace_iterator
*iter
= m
->private;
771 seq_release(inode
, file
);
776 static void notrace
ftrace_filter_reset(void)
778 struct ftrace_page
*pg
;
779 struct dyn_ftrace
*rec
;
782 /* keep kstop machine from running */
785 pg
= ftrace_pages_start
;
787 for (i
= 0; i
< pg
->index
; i
++) {
788 rec
= &pg
->records
[i
];
789 if (rec
->flags
& FTRACE_FL_FAILED
)
791 rec
->flags
&= ~FTRACE_FL_FILTER
;
799 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
801 struct ftrace_iterator
*iter
;
804 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
808 mutex_lock(&ftrace_filter_lock
);
809 if ((file
->f_mode
& FMODE_WRITE
) &&
810 !(file
->f_flags
& O_APPEND
))
811 ftrace_filter_reset();
813 if (file
->f_mode
& FMODE_READ
) {
814 iter
->pg
= ftrace_pages_start
;
816 iter
->flags
= FTRACE_ITER_FILTER
;
818 ret
= seq_open(file
, &show_ftrace_seq_ops
);
820 struct seq_file
*m
= file
->private_data
;
825 file
->private_data
= iter
;
826 mutex_unlock(&ftrace_filter_lock
);
831 static ssize_t notrace
832 ftrace_filter_read(struct file
*file
, char __user
*ubuf
,
833 size_t cnt
, loff_t
*ppos
)
835 if (file
->f_mode
& FMODE_READ
)
836 return seq_read(file
, ubuf
, cnt
, ppos
);
841 static loff_t notrace
842 ftrace_filter_lseek(struct file
*file
, loff_t offset
, int origin
)
846 if (file
->f_mode
& FMODE_READ
)
847 ret
= seq_lseek(file
, offset
, origin
);
849 file
->f_pos
= ret
= 1;
862 ftrace_match(unsigned char *buff
, int len
)
864 char str
[KSYM_SYMBOL_LEN
];
866 struct ftrace_page
*pg
;
867 struct dyn_ftrace
*rec
;
868 int type
= MATCH_FULL
;
869 unsigned i
, match
= 0, search_len
= 0;
871 for (i
= 0; i
< len
; i
++) {
872 if (buff
[i
] == '*') {
874 search
= buff
+ i
+ 1;
875 type
= MATCH_END_ONLY
;
876 search_len
= len
- (i
+ 1);
878 if (type
== MATCH_END_ONLY
) {
879 type
= MATCH_MIDDLE_ONLY
;
882 type
= MATCH_FRONT_ONLY
;
890 /* keep kstop machine from running */
893 pg
= ftrace_pages_start
;
895 for (i
= 0; i
< pg
->index
; i
++) {
899 rec
= &pg
->records
[i
];
900 if (rec
->flags
& FTRACE_FL_FAILED
)
902 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
905 if (strcmp(str
, buff
) == 0)
908 case MATCH_FRONT_ONLY
:
909 if (memcmp(str
, buff
, match
) == 0)
912 case MATCH_MIDDLE_ONLY
:
913 if (strstr(str
, search
))
917 ptr
= strstr(str
, search
);
918 if (ptr
&& (ptr
[search_len
] == 0))
923 rec
->flags
|= FTRACE_FL_FILTER
;
930 static ssize_t notrace
931 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
932 size_t cnt
, loff_t
*ppos
)
934 struct ftrace_iterator
*iter
;
942 mutex_lock(&ftrace_filter_lock
);
944 if (file
->f_mode
& FMODE_READ
) {
945 struct seq_file
*m
= file
->private_data
;
948 iter
= file
->private_data
;
951 iter
->flags
&= ~FTRACE_ITER_CONT
;
952 iter
->buffer_idx
= 0;
955 ret
= get_user(ch
, ubuf
++);
961 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
962 /* skip white space */
963 while (cnt
&& isspace(ch
)) {
964 ret
= get_user(ch
, ubuf
++);
978 iter
->buffer_idx
= 0;
981 while (cnt
&& !isspace(ch
)) {
982 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
983 iter
->buffer
[iter
->buffer_idx
++] = ch
;
988 ret
= get_user(ch
, ubuf
++);
997 iter
->buffer
[iter
->buffer_idx
] = 0;
998 ftrace_match(iter
->buffer
, iter
->buffer_idx
);
999 iter
->buffer_idx
= 0;
1001 iter
->flags
|= FTRACE_ITER_CONT
;
1004 file
->f_pos
+= read
;
1008 mutex_unlock(&ftrace_filter_lock
);
1014 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1016 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1017 struct ftrace_iterator
*iter
;
1019 mutex_lock(&ftrace_filter_lock
);
1020 if (file
->f_mode
& FMODE_READ
) {
1023 seq_release(inode
, file
);
1025 iter
= file
->private_data
;
1027 if (iter
->buffer_idx
) {
1029 iter
->buffer
[iter
->buffer_idx
] = 0;
1030 ftrace_match(iter
->buffer
, iter
->buffer_idx
);
1033 mutex_lock(&ftrace_sysctl_lock
);
1034 mutex_lock(&ftraced_lock
);
1035 if (iter
->filtered
&& ftraced_suspend
&& ftrace_enabled
)
1036 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1037 mutex_unlock(&ftraced_lock
);
1038 mutex_unlock(&ftrace_sysctl_lock
);
1041 mutex_unlock(&ftrace_filter_lock
);
1045 static struct file_operations ftrace_avail_fops
= {
1046 .open
= ftrace_avail_open
,
1048 .llseek
= seq_lseek
,
1049 .release
= ftrace_avail_release
,
1052 static struct file_operations ftrace_filter_fops
= {
1053 .open
= ftrace_filter_open
,
1054 .read
= ftrace_filter_read
,
1055 .write
= ftrace_filter_write
,
1056 .llseek
= ftrace_filter_lseek
,
1057 .release
= ftrace_filter_release
,
1061 * ftrace_force_update - force an update to all recording ftrace functions
1063 * The ftrace dynamic update daemon only wakes up once a second.
1064 * There may be cases where an update needs to be done immediately
1065 * for tests or internal kernel tracing to begin. This function
1066 * wakes the daemon to do an update and will not return until the
1067 * update is complete.
1069 int ftrace_force_update(void)
1071 unsigned long last_counter
;
1072 DECLARE_WAITQUEUE(wait
, current
);
1078 mutex_lock(&ftraced_lock
);
1079 last_counter
= ftraced_iteration_counter
;
1081 set_current_state(TASK_INTERRUPTIBLE
);
1082 add_wait_queue(&ftraced_waiters
, &wait
);
1085 mutex_unlock(&ftraced_lock
);
1086 wake_up_process(ftraced_task
);
1088 mutex_lock(&ftraced_lock
);
1089 if (signal_pending(current
)) {
1093 set_current_state(TASK_INTERRUPTIBLE
);
1094 } while (last_counter
== ftraced_iteration_counter
);
1096 mutex_unlock(&ftraced_lock
);
1097 remove_wait_queue(&ftraced_waiters
, &wait
);
1098 set_current_state(TASK_RUNNING
);
1103 static __init
int ftrace_init_debugfs(void)
1105 struct dentry
*d_tracer
;
1106 struct dentry
*entry
;
1108 d_tracer
= tracing_init_dentry();
1110 entry
= debugfs_create_file("available_filter_functions", 0444,
1111 d_tracer
, NULL
, &ftrace_avail_fops
);
1113 pr_warning("Could not create debugfs "
1114 "'available_filter_functions' entry\n");
1116 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1117 NULL
, &ftrace_filter_fops
);
1119 pr_warning("Could not create debugfs "
1120 "'set_ftrace_filter' entry\n");
1124 fs_initcall(ftrace_init_debugfs
);
1126 static int __init notrace
ftrace_dynamic_init(void)
1128 struct task_struct
*p
;
1132 addr
= (unsigned long)ftrace_record_ip
;
1133 stop_machine_run(ftrace_dyn_arch_init
, &addr
, NR_CPUS
);
1135 /* ftrace_dyn_arch_init places the return code in addr */
1139 ret
= ftrace_dyn_table_alloc();
1143 p
= kthread_run(ftraced
, NULL
, "ftraced");
1147 last_ftrace_enabled
= ftrace_enabled
= 1;
1153 core_initcall(ftrace_dynamic_init
);
1155 # define ftrace_startup() do { } while (0)
1156 # define ftrace_shutdown() do { } while (0)
1157 # define ftrace_startup_sysctl() do { } while (0)
1158 # define ftrace_shutdown_sysctl() do { } while (0)
1159 #endif /* CONFIG_DYNAMIC_FTRACE */
1162 * register_ftrace_function - register a function for profiling
1163 * @ops - ops structure that holds the function for profiling.
1165 * Register a function to be called by all functions in the
1168 * Note: @ops->func and all the functions it calls must be labeled
1169 * with "notrace", otherwise it will go into a
1172 int register_ftrace_function(struct ftrace_ops
*ops
)
1176 mutex_lock(&ftrace_sysctl_lock
);
1177 ret
= __register_ftrace_function(ops
);
1179 mutex_unlock(&ftrace_sysctl_lock
);
1185 * unregister_ftrace_function - unresgister a function for profiling.
1186 * @ops - ops structure that holds the function to unregister
1188 * Unregister a function that was added to be called by ftrace profiling.
1190 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1194 mutex_lock(&ftrace_sysctl_lock
);
1195 ret
= __unregister_ftrace_function(ops
);
1197 mutex_unlock(&ftrace_sysctl_lock
);
1203 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1204 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1209 mutex_lock(&ftrace_sysctl_lock
);
1211 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1213 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1216 last_ftrace_enabled
= ftrace_enabled
;
1218 if (ftrace_enabled
) {
1220 ftrace_startup_sysctl();
1222 /* we are starting ftrace again */
1223 if (ftrace_list
!= &ftrace_list_end
) {
1224 if (ftrace_list
->next
== &ftrace_list_end
)
1225 ftrace_trace_function
= ftrace_list
->func
;
1227 ftrace_trace_function
= ftrace_list_func
;
1231 /* stopping ftrace calls (just send to ftrace_stub) */
1232 ftrace_trace_function
= ftrace_stub
;
1234 ftrace_shutdown_sysctl();
1238 mutex_unlock(&ftrace_sysctl_lock
);