4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include "trace_output.h"
22 #define TRACE_SYSTEM "TRACE_SYSTEM"
24 static DEFINE_MUTEX(event_mutex
);
26 LIST_HEAD(ftrace_events
);
28 int trace_define_field(struct ftrace_event_call
*call
, char *type
,
29 char *name
, int offset
, int size
, int is_signed
)
31 struct ftrace_event_field
*field
;
33 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
37 field
->name
= kstrdup(name
, GFP_KERNEL
);
41 field
->type
= kstrdup(type
, GFP_KERNEL
);
45 field
->offset
= offset
;
47 field
->is_signed
= is_signed
;
48 list_add(&field
->link
, &call
->fields
);
61 EXPORT_SYMBOL_GPL(trace_define_field
);
65 static void trace_destroy_fields(struct ftrace_event_call
*call
)
67 struct ftrace_event_field
*field
, *next
;
69 list_for_each_entry_safe(field
, next
, &call
->fields
, link
) {
70 list_del(&field
->link
);
77 #endif /* CONFIG_MODULES */
79 static void ftrace_clear_events(void)
81 struct ftrace_event_call
*call
;
83 list_for_each_entry(call
, &ftrace_events
, list
) {
92 static void ftrace_event_enable_disable(struct ftrace_event_call
*call
,
104 if (!call
->enabled
) {
112 static int ftrace_set_clr_event(char *buf
, int set
)
114 struct ftrace_event_call
*call
;
115 char *event
= NULL
, *sub
= NULL
, *match
;
119 * The buf format can be <subsystem>:<event-name>
120 * *:<event-name> means any event by that name.
121 * :<event-name> is the same.
123 * <subsystem>:* means all events in that subsystem
124 * <subsystem>: means the same.
126 * <name> (no ':') means all events in a subsystem with
127 * the name <name> or any event that matches <name>
130 match
= strsep(&buf
, ":");
136 if (!strlen(sub
) || strcmp(sub
, "*") == 0)
138 if (!strlen(event
) || strcmp(event
, "*") == 0)
142 mutex_lock(&event_mutex
);
143 list_for_each_entry(call
, &ftrace_events
, list
) {
145 if (!call
->name
|| !call
->regfunc
)
149 strcmp(match
, call
->name
) != 0 &&
150 strcmp(match
, call
->system
) != 0)
153 if (sub
&& strcmp(sub
, call
->system
) != 0)
156 if (event
&& strcmp(event
, call
->name
) != 0)
159 ftrace_event_enable_disable(call
, set
);
163 mutex_unlock(&event_mutex
);
168 /* 128 should be much more than enough */
169 #define EVENT_BUF_SIZE 127
172 ftrace_event_write(struct file
*file
, const char __user
*ubuf
,
173 size_t cnt
, loff_t
*ppos
)
184 ret
= tracing_update_buffers();
188 ret
= get_user(ch
, ubuf
++);
194 /* skip white space */
195 while (cnt
&& isspace(ch
)) {
196 ret
= get_user(ch
, ubuf
++);
203 /* Only white space found? */
210 buf
= kmalloc(EVENT_BUF_SIZE
+1, GFP_KERNEL
);
214 if (cnt
> EVENT_BUF_SIZE
)
215 cnt
= EVENT_BUF_SIZE
;
218 while (cnt
&& !isspace(ch
)) {
224 ret
= get_user(ch
, ubuf
++);
234 ret
= ftrace_set_clr_event(buf
, set
);
247 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
249 struct list_head
*list
= m
->private;
250 struct ftrace_event_call
*call
;
255 if (list
== &ftrace_events
)
258 call
= list_entry(list
, struct ftrace_event_call
, list
);
261 * The ftrace subsystem is for showing formats only.
262 * They can not be enabled or disabled via the event files.
270 m
->private = list
->next
;
275 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
277 return t_next(m
, NULL
, pos
);
281 s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
283 struct list_head
*list
= m
->private;
284 struct ftrace_event_call
*call
;
289 if (list
== &ftrace_events
)
292 call
= list_entry(list
, struct ftrace_event_call
, list
);
294 if (!call
->enabled
) {
299 m
->private = list
->next
;
304 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
306 return s_next(m
, NULL
, pos
);
309 static int t_show(struct seq_file
*m
, void *v
)
311 struct ftrace_event_call
*call
= v
;
313 if (strcmp(call
->system
, TRACE_SYSTEM
) != 0)
314 seq_printf(m
, "%s:", call
->system
);
315 seq_printf(m
, "%s\n", call
->name
);
320 static void t_stop(struct seq_file
*m
, void *p
)
325 ftrace_event_seq_open(struct inode
*inode
, struct file
*file
)
328 const struct seq_operations
*seq_ops
;
330 if ((file
->f_mode
& FMODE_WRITE
) &&
331 !(file
->f_flags
& O_APPEND
))
332 ftrace_clear_events();
334 seq_ops
= inode
->i_private
;
335 ret
= seq_open(file
, seq_ops
);
337 struct seq_file
*m
= file
->private_data
;
339 m
->private = ftrace_events
.next
;
345 event_enable_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
348 struct ftrace_event_call
*call
= filp
->private_data
;
356 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
360 event_enable_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
363 struct ftrace_event_call
*call
= filp
->private_data
;
368 if (cnt
>= sizeof(buf
))
371 if (copy_from_user(&buf
, ubuf
, cnt
))
376 ret
= strict_strtoul(buf
, 10, &val
);
380 ret
= tracing_update_buffers();
387 mutex_lock(&event_mutex
);
388 ftrace_event_enable_disable(call
, val
);
389 mutex_unlock(&event_mutex
);
401 extern char *__bad_type_size(void);
404 #define FIELD(type, name) \
405 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
406 #type, "common_" #name, offsetof(typeof(field), name), \
409 static int trace_write_header(struct trace_seq
*s
)
411 struct trace_entry field
;
413 /* struct trace_entry */
414 return trace_seq_printf(s
,
415 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
416 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
417 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
418 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
419 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
421 FIELD(unsigned short, type
),
422 FIELD(unsigned char, flags
),
423 FIELD(unsigned char, preempt_count
),
429 event_format_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
432 struct ftrace_event_call
*call
= filp
->private_data
;
440 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
446 /* If any of the first writes fail, so will the show_format. */
448 trace_seq_printf(s
, "name: %s\n", call
->name
);
449 trace_seq_printf(s
, "ID: %d\n", call
->id
);
450 trace_seq_printf(s
, "format:\n");
451 trace_write_header(s
);
453 r
= call
->show_format(s
);
456 * ug! The format output is bigger than a PAGE!!
458 buf
= "FORMAT TOO BIG\n";
459 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
464 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
472 event_id_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
474 struct ftrace_event_call
*call
= filp
->private_data
;
481 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
486 trace_seq_printf(s
, "%d\n", call
->id
);
488 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
495 event_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
498 struct ftrace_event_call
*call
= filp
->private_data
;
505 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
511 print_event_filter(call
, s
);
512 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
520 event_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
523 struct ftrace_event_call
*call
= filp
->private_data
;
527 if (cnt
>= PAGE_SIZE
)
530 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
534 if (copy_from_user(buf
, ubuf
, cnt
)) {
535 free_page((unsigned long) buf
);
540 err
= apply_event_filter(call
, buf
);
541 free_page((unsigned long) buf
);
551 subsystem_filter_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
554 struct event_subsystem
*system
= filp
->private_data
;
561 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
567 print_subsystem_event_filter(system
, s
);
568 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
576 subsystem_filter_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
579 struct event_subsystem
*system
= filp
->private_data
;
583 if (cnt
>= PAGE_SIZE
)
586 buf
= (char *)__get_free_page(GFP_TEMPORARY
);
590 if (copy_from_user(buf
, ubuf
, cnt
)) {
591 free_page((unsigned long) buf
);
596 err
= apply_subsystem_event_filter(system
, buf
);
597 free_page((unsigned long) buf
);
607 show_header(struct file
*filp
, char __user
*ubuf
, size_t cnt
, loff_t
*ppos
)
609 int (*func
)(struct trace_seq
*s
) = filp
->private_data
;
616 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
623 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, s
->buffer
, s
->len
);
630 static const struct seq_operations show_event_seq_ops
= {
637 static const struct seq_operations show_set_event_seq_ops
= {
644 static const struct file_operations ftrace_avail_fops
= {
645 .open
= ftrace_event_seq_open
,
648 .release
= seq_release
,
651 static const struct file_operations ftrace_set_event_fops
= {
652 .open
= ftrace_event_seq_open
,
654 .write
= ftrace_event_write
,
656 .release
= seq_release
,
659 static const struct file_operations ftrace_enable_fops
= {
660 .open
= tracing_open_generic
,
661 .read
= event_enable_read
,
662 .write
= event_enable_write
,
665 static const struct file_operations ftrace_event_format_fops
= {
666 .open
= tracing_open_generic
,
667 .read
= event_format_read
,
670 static const struct file_operations ftrace_event_id_fops
= {
671 .open
= tracing_open_generic
,
672 .read
= event_id_read
,
675 static const struct file_operations ftrace_event_filter_fops
= {
676 .open
= tracing_open_generic
,
677 .read
= event_filter_read
,
678 .write
= event_filter_write
,
681 static const struct file_operations ftrace_subsystem_filter_fops
= {
682 .open
= tracing_open_generic
,
683 .read
= subsystem_filter_read
,
684 .write
= subsystem_filter_write
,
687 static const struct file_operations ftrace_show_header_fops
= {
688 .open
= tracing_open_generic
,
692 static struct dentry
*event_trace_events_dir(void)
694 static struct dentry
*d_tracer
;
695 static struct dentry
*d_events
;
700 d_tracer
= tracing_init_dentry();
704 d_events
= debugfs_create_dir("events", d_tracer
);
706 pr_warning("Could not create debugfs "
707 "'events' directory\n");
712 static LIST_HEAD(event_subsystems
);
714 static struct dentry
*
715 event_subsystem_dir(const char *name
, struct dentry
*d_events
)
717 struct event_subsystem
*system
;
718 struct dentry
*entry
;
720 /* First see if we did not already create this dir */
721 list_for_each_entry(system
, &event_subsystems
, list
) {
722 if (strcmp(system
->name
, name
) == 0)
723 return system
->entry
;
726 /* need to create new entry */
727 system
= kmalloc(sizeof(*system
), GFP_KERNEL
);
729 pr_warning("No memory to create event subsystem %s\n",
734 system
->entry
= debugfs_create_dir(name
, d_events
);
735 if (!system
->entry
) {
736 pr_warning("Could not create event subsystem %s\n",
742 system
->name
= kstrdup(name
, GFP_KERNEL
);
744 debugfs_remove(system
->entry
);
749 list_add(&system
->list
, &event_subsystems
);
751 system
->filter
= NULL
;
753 system
->filter
= kzalloc(sizeof(struct event_filter
), GFP_KERNEL
);
754 if (!system
->filter
) {
755 pr_warning("Could not allocate filter for subsystem "
757 return system
->entry
;
760 entry
= debugfs_create_file("filter", 0644, system
->entry
, system
,
761 &ftrace_subsystem_filter_fops
);
763 kfree(system
->filter
);
764 system
->filter
= NULL
;
765 pr_warning("Could not create debugfs "
766 "'%s/filter' entry\n", name
);
769 return system
->entry
;
773 event_create_dir(struct ftrace_event_call
*call
, struct dentry
*d_events
,
774 const struct file_operations
*id
,
775 const struct file_operations
*enable
,
776 const struct file_operations
*filter
,
777 const struct file_operations
*format
)
779 struct dentry
*entry
;
783 * If the trace point header did not define TRACE_SYSTEM
784 * then the system would be called "TRACE_SYSTEM".
786 if (strcmp(call
->system
, TRACE_SYSTEM
) != 0)
787 d_events
= event_subsystem_dir(call
->system
, d_events
);
789 if (call
->raw_init
) {
790 ret
= call
->raw_init();
792 pr_warning("Could not initialize trace point"
793 " events/%s\n", call
->name
);
798 call
->dir
= debugfs_create_dir(call
->name
, d_events
);
800 pr_warning("Could not create debugfs "
801 "'%s' directory\n", call
->name
);
806 entry
= trace_create_file("enable", 0644, call
->dir
, call
,
810 entry
= trace_create_file("id", 0444, call
->dir
, call
,
813 if (call
->define_fields
) {
814 ret
= call
->define_fields();
816 pr_warning("Could not initialize trace point"
817 " events/%s\n", call
->name
);
820 entry
= trace_create_file("filter", 0644, call
->dir
, call
,
824 /* A trace may not want to export its format */
825 if (!call
->show_format
)
828 entry
= trace_create_file("format", 0444, call
->dir
, call
,
834 #define for_each_event(event, start, end) \
835 for (event = start; \
836 (unsigned long)event < (unsigned long)end; \
839 #ifdef CONFIG_MODULES
841 static LIST_HEAD(ftrace_module_file_list
);
844 * Modules must own their file_operations to keep up with
845 * reference counting.
847 struct ftrace_module_file_ops
{
848 struct list_head list
;
850 struct file_operations id
;
851 struct file_operations enable
;
852 struct file_operations format
;
853 struct file_operations filter
;
856 static struct ftrace_module_file_ops
*
857 trace_create_file_ops(struct module
*mod
)
859 struct ftrace_module_file_ops
*file_ops
;
862 * This is a bit of a PITA. To allow for correct reference
863 * counting, modules must "own" their file_operations.
864 * To do this, we allocate the file operations that will be
865 * used in the event directory.
868 file_ops
= kmalloc(sizeof(*file_ops
), GFP_KERNEL
);
874 file_ops
->id
= ftrace_event_id_fops
;
875 file_ops
->id
.owner
= mod
;
877 file_ops
->enable
= ftrace_enable_fops
;
878 file_ops
->enable
.owner
= mod
;
880 file_ops
->filter
= ftrace_event_filter_fops
;
881 file_ops
->filter
.owner
= mod
;
883 file_ops
->format
= ftrace_event_format_fops
;
884 file_ops
->format
.owner
= mod
;
886 list_add(&file_ops
->list
, &ftrace_module_file_list
);
891 static void trace_module_add_events(struct module
*mod
)
893 struct ftrace_module_file_ops
*file_ops
= NULL
;
894 struct ftrace_event_call
*call
, *start
, *end
;
895 struct dentry
*d_events
;
897 start
= mod
->trace_events
;
898 end
= mod
->trace_events
+ mod
->num_trace_events
;
903 d_events
= event_trace_events_dir();
907 for_each_event(call
, start
, end
) {
908 /* The linker may leave blanks */
913 * This module has events, create file ops for this module
914 * if not already done.
917 file_ops
= trace_create_file_ops(mod
);
922 list_add(&call
->list
, &ftrace_events
);
923 event_create_dir(call
, d_events
,
924 &file_ops
->id
, &file_ops
->enable
,
925 &file_ops
->filter
, &file_ops
->format
);
929 static void trace_module_remove_events(struct module
*mod
)
931 struct ftrace_module_file_ops
*file_ops
;
932 struct ftrace_event_call
*call
, *p
;
934 list_for_each_entry_safe(call
, p
, &ftrace_events
, list
) {
935 if (call
->mod
== mod
) {
941 unregister_ftrace_event(call
->event
);
942 debugfs_remove_recursive(call
->dir
);
943 list_del(&call
->list
);
944 trace_destroy_fields(call
);
949 /* Now free the file_operations */
950 list_for_each_entry(file_ops
, &ftrace_module_file_list
, list
) {
951 if (file_ops
->mod
== mod
)
954 if (&file_ops
->list
!= &ftrace_module_file_list
) {
955 list_del(&file_ops
->list
);
960 static int trace_module_notify(struct notifier_block
*self
,
961 unsigned long val
, void *data
)
963 struct module
*mod
= data
;
965 mutex_lock(&event_mutex
);
967 case MODULE_STATE_COMING
:
968 trace_module_add_events(mod
);
970 case MODULE_STATE_GOING
:
971 trace_module_remove_events(mod
);
974 mutex_unlock(&event_mutex
);
979 static int trace_module_notify(struct notifier_block
*self
,
980 unsigned long val
, void *data
)
984 #endif /* CONFIG_MODULES */
986 struct notifier_block trace_module_nb
= {
987 .notifier_call
= trace_module_notify
,
991 extern struct ftrace_event_call __start_ftrace_events
[];
992 extern struct ftrace_event_call __stop_ftrace_events
[];
994 static __init
int event_trace_init(void)
996 struct ftrace_event_call
*call
;
997 struct dentry
*d_tracer
;
998 struct dentry
*entry
;
999 struct dentry
*d_events
;
1002 d_tracer
= tracing_init_dentry();
1006 entry
= debugfs_create_file("available_events", 0444, d_tracer
,
1007 (void *)&show_event_seq_ops
,
1008 &ftrace_avail_fops
);
1010 pr_warning("Could not create debugfs "
1011 "'available_events' entry\n");
1013 entry
= debugfs_create_file("set_event", 0644, d_tracer
,
1014 (void *)&show_set_event_seq_ops
,
1015 &ftrace_set_event_fops
);
1017 pr_warning("Could not create debugfs "
1018 "'set_event' entry\n");
1020 d_events
= event_trace_events_dir();
1024 /* ring buffer internal formats */
1025 trace_create_file("header_page", 0444, d_events
,
1026 ring_buffer_print_page_header
,
1027 &ftrace_show_header_fops
);
1029 trace_create_file("header_event", 0444, d_events
,
1030 ring_buffer_print_entry_header
,
1031 &ftrace_show_header_fops
);
1033 for_each_event(call
, __start_ftrace_events
, __stop_ftrace_events
) {
1034 /* The linker may leave blanks */
1037 list_add(&call
->list
, &ftrace_events
);
1038 event_create_dir(call
, d_events
, &ftrace_event_id_fops
,
1039 &ftrace_enable_fops
, &ftrace_event_filter_fops
,
1040 &ftrace_event_format_fops
);
1043 ret
= register_module_notifier(&trace_module_nb
);
1045 pr_warning("Failed to register trace events module notifier\n");
1049 fs_initcall(event_trace_init
);
1051 #ifdef CONFIG_FTRACE_STARTUP_TEST
1053 static DEFINE_SPINLOCK(test_spinlock
);
1054 static DEFINE_SPINLOCK(test_spinlock_irq
);
1055 static DEFINE_MUTEX(test_mutex
);
1057 static __init
void test_work(struct work_struct
*dummy
)
1059 spin_lock(&test_spinlock
);
1060 spin_lock_irq(&test_spinlock_irq
);
1062 spin_unlock_irq(&test_spinlock_irq
);
1063 spin_unlock(&test_spinlock
);
1065 mutex_lock(&test_mutex
);
1067 mutex_unlock(&test_mutex
);
1070 static __init
int event_test_thread(void *unused
)
1074 test_malloc
= kmalloc(1234, GFP_KERNEL
);
1076 pr_info("failed to kmalloc\n");
1078 schedule_on_each_cpu(test_work
);
1082 set_current_state(TASK_INTERRUPTIBLE
);
1083 while (!kthread_should_stop())
1090 * Do various things that may trigger events.
1092 static __init
void event_test_stuff(void)
1094 struct task_struct
*test_thread
;
1096 test_thread
= kthread_run(event_test_thread
, NULL
, "test-events");
1098 kthread_stop(test_thread
);
1102 * For every trace event defined, we will test each trace point separately,
1103 * and then by groups, and finally all trace points.
1105 static __init
void event_trace_self_tests(void)
1107 struct ftrace_event_call
*call
;
1108 struct event_subsystem
*system
;
1112 pr_info("Running tests on trace events:\n");
1114 list_for_each_entry(call
, &ftrace_events
, list
) {
1116 /* Only test those that have a regfunc */
1120 pr_info("Testing event %s: ", call
->name
);
1123 * If an event is already enabled, someone is using
1124 * it and the self test should not be on.
1126 if (call
->enabled
) {
1127 pr_warning("Enabled event during self test!\n");
1143 /* Now test at the sub system level */
1145 pr_info("Running tests on trace event systems:\n");
1147 list_for_each_entry(system
, &event_subsystems
, list
) {
1149 /* the ftrace system is special, skip it */
1150 if (strcmp(system
->name
, "ftrace") == 0)
1153 pr_info("Testing event system %s: ", system
->name
);
1155 /* ftrace_set_clr_event can modify the name passed in. */
1156 sysname
= kstrdup(system
->name
, GFP_KERNEL
);
1157 if (WARN_ON(!sysname
)) {
1158 pr_warning("Can't allocate memory, giving up!\n");
1161 ret
= ftrace_set_clr_event(sysname
, 1);
1163 if (WARN_ON_ONCE(ret
)) {
1164 pr_warning("error enabling system %s\n",
1171 sysname
= kstrdup(system
->name
, GFP_KERNEL
);
1172 if (WARN_ON(!sysname
)) {
1173 pr_warning("Can't allocate memory, giving up!\n");
1176 ret
= ftrace_set_clr_event(sysname
, 0);
1179 if (WARN_ON_ONCE(ret
))
1180 pr_warning("error disabling system %s\n",
1186 /* Test with all events enabled */
1188 pr_info("Running tests on all trace events:\n");
1189 pr_info("Testing all events: ");
1191 sysname
= kmalloc(4, GFP_KERNEL
);
1192 if (WARN_ON(!sysname
)) {
1193 pr_warning("Can't allocate memory, giving up!\n");
1196 memcpy(sysname
, "*:*", 4);
1197 ret
= ftrace_set_clr_event(sysname
, 1);
1198 if (WARN_ON_ONCE(ret
)) {
1200 pr_warning("error enabling all events\n");
1207 memcpy(sysname
, "*:*", 4);
1208 ret
= ftrace_set_clr_event(sysname
, 0);
1211 if (WARN_ON_ONCE(ret
)) {
1212 pr_warning("error disabling all events\n");
1219 #ifdef CONFIG_FUNCTION_TRACER
1221 static DEFINE_PER_CPU(atomic_t
, test_event_disable
);
1224 function_test_events_call(unsigned long ip
, unsigned long parent_ip
)
1226 struct ring_buffer_event
*event
;
1227 struct ftrace_entry
*entry
;
1228 unsigned long flags
;
1234 pc
= preempt_count();
1235 resched
= ftrace_preempt_disable();
1236 cpu
= raw_smp_processor_id();
1237 disabled
= atomic_inc_return(&per_cpu(test_event_disable
, cpu
));
1242 local_save_flags(flags
);
1244 event
= trace_current_buffer_lock_reserve(TRACE_FN
, sizeof(*entry
),
1248 entry
= ring_buffer_event_data(event
);
1250 entry
->parent_ip
= parent_ip
;
1252 trace_nowake_buffer_unlock_commit(event
, flags
, pc
);
1255 atomic_dec(&per_cpu(test_event_disable
, cpu
));
1256 ftrace_preempt_enable(resched
);
1259 static struct ftrace_ops trace_ops __initdata
=
1261 .func
= function_test_events_call
,
1264 static __init
void event_trace_self_test_with_function(void)
1266 register_ftrace_function(&trace_ops
);
1267 pr_info("Running tests again, along with the function tracer\n");
1268 event_trace_self_tests();
1269 unregister_ftrace_function(&trace_ops
);
1272 static __init
void event_trace_self_test_with_function(void)
1277 static __init
int event_trace_self_tests_init(void)
1280 event_trace_self_tests();
1282 event_trace_self_test_with_function();
1287 late_initcall(event_trace_self_tests_init
);