tracing/events: fix memory leak when unloading module
[deliverable/linux.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
19
20 #include "trace_output.h"
21
22 #define TRACE_SYSTEM "TRACE_SYSTEM"
23
24 static DEFINE_MUTEX(event_mutex);
25
26 LIST_HEAD(ftrace_events);
27
28 int trace_define_field(struct ftrace_event_call *call, char *type,
29 char *name, int offset, int size, int is_signed)
30 {
31 struct ftrace_event_field *field;
32
33 field = kzalloc(sizeof(*field), GFP_KERNEL);
34 if (!field)
35 goto err;
36
37 field->name = kstrdup(name, GFP_KERNEL);
38 if (!field->name)
39 goto err;
40
41 field->type = kstrdup(type, GFP_KERNEL);
42 if (!field->type)
43 goto err;
44
45 field->offset = offset;
46 field->size = size;
47 field->is_signed = is_signed;
48 list_add(&field->link, &call->fields);
49
50 return 0;
51
52 err:
53 if (field) {
54 kfree(field->name);
55 kfree(field->type);
56 }
57 kfree(field);
58
59 return -ENOMEM;
60 }
61 EXPORT_SYMBOL_GPL(trace_define_field);
62
63 #ifdef CONFIG_MODULES
64
65 static void trace_destroy_fields(struct ftrace_event_call *call)
66 {
67 struct ftrace_event_field *field, *next;
68
69 list_for_each_entry_safe(field, next, &call->fields, link) {
70 list_del(&field->link);
71 kfree(field->type);
72 kfree(field->name);
73 kfree(field);
74 }
75 }
76
77 #endif /* CONFIG_MODULES */
78
79 static void ftrace_clear_events(void)
80 {
81 struct ftrace_event_call *call;
82
83 list_for_each_entry(call, &ftrace_events, list) {
84
85 if (call->enabled) {
86 call->enabled = 0;
87 call->unregfunc();
88 }
89 }
90 }
91
92 static void ftrace_event_enable_disable(struct ftrace_event_call *call,
93 int enable)
94 {
95
96 switch (enable) {
97 case 0:
98 if (call->enabled) {
99 call->enabled = 0;
100 call->unregfunc();
101 }
102 break;
103 case 1:
104 if (!call->enabled) {
105 call->enabled = 1;
106 call->regfunc();
107 }
108 break;
109 }
110 }
111
112 static int ftrace_set_clr_event(char *buf, int set)
113 {
114 struct ftrace_event_call *call;
115 char *event = NULL, *sub = NULL, *match;
116 int ret = -EINVAL;
117
118 /*
119 * The buf format can be <subsystem>:<event-name>
120 * *:<event-name> means any event by that name.
121 * :<event-name> is the same.
122 *
123 * <subsystem>:* means all events in that subsystem
124 * <subsystem>: means the same.
125 *
126 * <name> (no ':') means all events in a subsystem with
127 * the name <name> or any event that matches <name>
128 */
129
130 match = strsep(&buf, ":");
131 if (buf) {
132 sub = match;
133 event = buf;
134 match = NULL;
135
136 if (!strlen(sub) || strcmp(sub, "*") == 0)
137 sub = NULL;
138 if (!strlen(event) || strcmp(event, "*") == 0)
139 event = NULL;
140 }
141
142 mutex_lock(&event_mutex);
143 list_for_each_entry(call, &ftrace_events, list) {
144
145 if (!call->name || !call->regfunc)
146 continue;
147
148 if (match &&
149 strcmp(match, call->name) != 0 &&
150 strcmp(match, call->system) != 0)
151 continue;
152
153 if (sub && strcmp(sub, call->system) != 0)
154 continue;
155
156 if (event && strcmp(event, call->name) != 0)
157 continue;
158
159 ftrace_event_enable_disable(call, set);
160
161 ret = 0;
162 }
163 mutex_unlock(&event_mutex);
164
165 return ret;
166 }
167
168 /* 128 should be much more than enough */
169 #define EVENT_BUF_SIZE 127
170
171 static ssize_t
172 ftrace_event_write(struct file *file, const char __user *ubuf,
173 size_t cnt, loff_t *ppos)
174 {
175 size_t read = 0;
176 int i, set = 1;
177 ssize_t ret;
178 char *buf;
179 char ch;
180
181 if (!cnt || cnt < 0)
182 return 0;
183
184 ret = tracing_update_buffers();
185 if (ret < 0)
186 return ret;
187
188 ret = get_user(ch, ubuf++);
189 if (ret)
190 return ret;
191 read++;
192 cnt--;
193
194 /* skip white space */
195 while (cnt && isspace(ch)) {
196 ret = get_user(ch, ubuf++);
197 if (ret)
198 return ret;
199 read++;
200 cnt--;
201 }
202
203 /* Only white space found? */
204 if (isspace(ch)) {
205 file->f_pos += read;
206 ret = read;
207 return ret;
208 }
209
210 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
211 if (!buf)
212 return -ENOMEM;
213
214 if (cnt > EVENT_BUF_SIZE)
215 cnt = EVENT_BUF_SIZE;
216
217 i = 0;
218 while (cnt && !isspace(ch)) {
219 if (!i && ch == '!')
220 set = 0;
221 else
222 buf[i++] = ch;
223
224 ret = get_user(ch, ubuf++);
225 if (ret)
226 goto out_free;
227 read++;
228 cnt--;
229 }
230 buf[i] = 0;
231
232 file->f_pos += read;
233
234 ret = ftrace_set_clr_event(buf, set);
235 if (ret)
236 goto out_free;
237
238 ret = read;
239
240 out_free:
241 kfree(buf);
242
243 return ret;
244 }
245
246 static void *
247 t_next(struct seq_file *m, void *v, loff_t *pos)
248 {
249 struct list_head *list = m->private;
250 struct ftrace_event_call *call;
251
252 (*pos)++;
253
254 for (;;) {
255 if (list == &ftrace_events)
256 return NULL;
257
258 call = list_entry(list, struct ftrace_event_call, list);
259
260 /*
261 * The ftrace subsystem is for showing formats only.
262 * They can not be enabled or disabled via the event files.
263 */
264 if (call->regfunc)
265 break;
266
267 list = list->next;
268 }
269
270 m->private = list->next;
271
272 return call;
273 }
274
275 static void *t_start(struct seq_file *m, loff_t *pos)
276 {
277 return t_next(m, NULL, pos);
278 }
279
280 static void *
281 s_next(struct seq_file *m, void *v, loff_t *pos)
282 {
283 struct list_head *list = m->private;
284 struct ftrace_event_call *call;
285
286 (*pos)++;
287
288 retry:
289 if (list == &ftrace_events)
290 return NULL;
291
292 call = list_entry(list, struct ftrace_event_call, list);
293
294 if (!call->enabled) {
295 list = list->next;
296 goto retry;
297 }
298
299 m->private = list->next;
300
301 return call;
302 }
303
304 static void *s_start(struct seq_file *m, loff_t *pos)
305 {
306 return s_next(m, NULL, pos);
307 }
308
309 static int t_show(struct seq_file *m, void *v)
310 {
311 struct ftrace_event_call *call = v;
312
313 if (strcmp(call->system, TRACE_SYSTEM) != 0)
314 seq_printf(m, "%s:", call->system);
315 seq_printf(m, "%s\n", call->name);
316
317 return 0;
318 }
319
320 static void t_stop(struct seq_file *m, void *p)
321 {
322 }
323
324 static int
325 ftrace_event_seq_open(struct inode *inode, struct file *file)
326 {
327 int ret;
328 const struct seq_operations *seq_ops;
329
330 if ((file->f_mode & FMODE_WRITE) &&
331 !(file->f_flags & O_APPEND))
332 ftrace_clear_events();
333
334 seq_ops = inode->i_private;
335 ret = seq_open(file, seq_ops);
336 if (!ret) {
337 struct seq_file *m = file->private_data;
338
339 m->private = ftrace_events.next;
340 }
341 return ret;
342 }
343
344 static ssize_t
345 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
346 loff_t *ppos)
347 {
348 struct ftrace_event_call *call = filp->private_data;
349 char *buf;
350
351 if (call->enabled)
352 buf = "1\n";
353 else
354 buf = "0\n";
355
356 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
357 }
358
359 static ssize_t
360 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
361 loff_t *ppos)
362 {
363 struct ftrace_event_call *call = filp->private_data;
364 char buf[64];
365 unsigned long val;
366 int ret;
367
368 if (cnt >= sizeof(buf))
369 return -EINVAL;
370
371 if (copy_from_user(&buf, ubuf, cnt))
372 return -EFAULT;
373
374 buf[cnt] = 0;
375
376 ret = strict_strtoul(buf, 10, &val);
377 if (ret < 0)
378 return ret;
379
380 ret = tracing_update_buffers();
381 if (ret < 0)
382 return ret;
383
384 switch (val) {
385 case 0:
386 case 1:
387 mutex_lock(&event_mutex);
388 ftrace_event_enable_disable(call, val);
389 mutex_unlock(&event_mutex);
390 break;
391
392 default:
393 return -EINVAL;
394 }
395
396 *ppos += cnt;
397
398 return cnt;
399 }
400
401 extern char *__bad_type_size(void);
402
403 #undef FIELD
404 #define FIELD(type, name) \
405 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
406 #type, "common_" #name, offsetof(typeof(field), name), \
407 sizeof(field.name)
408
409 static int trace_write_header(struct trace_seq *s)
410 {
411 struct trace_entry field;
412
413 /* struct trace_entry */
414 return trace_seq_printf(s,
415 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
416 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
417 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
418 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
419 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
420 "\n",
421 FIELD(unsigned short, type),
422 FIELD(unsigned char, flags),
423 FIELD(unsigned char, preempt_count),
424 FIELD(int, pid),
425 FIELD(int, tgid));
426 }
427
428 static ssize_t
429 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
430 loff_t *ppos)
431 {
432 struct ftrace_event_call *call = filp->private_data;
433 struct trace_seq *s;
434 char *buf;
435 int r;
436
437 if (*ppos)
438 return 0;
439
440 s = kmalloc(sizeof(*s), GFP_KERNEL);
441 if (!s)
442 return -ENOMEM;
443
444 trace_seq_init(s);
445
446 /* If any of the first writes fail, so will the show_format. */
447
448 trace_seq_printf(s, "name: %s\n", call->name);
449 trace_seq_printf(s, "ID: %d\n", call->id);
450 trace_seq_printf(s, "format:\n");
451 trace_write_header(s);
452
453 r = call->show_format(s);
454 if (!r) {
455 /*
456 * ug! The format output is bigger than a PAGE!!
457 */
458 buf = "FORMAT TOO BIG\n";
459 r = simple_read_from_buffer(ubuf, cnt, ppos,
460 buf, strlen(buf));
461 goto out;
462 }
463
464 r = simple_read_from_buffer(ubuf, cnt, ppos,
465 s->buffer, s->len);
466 out:
467 kfree(s);
468 return r;
469 }
470
471 static ssize_t
472 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
473 {
474 struct ftrace_event_call *call = filp->private_data;
475 struct trace_seq *s;
476 int r;
477
478 if (*ppos)
479 return 0;
480
481 s = kmalloc(sizeof(*s), GFP_KERNEL);
482 if (!s)
483 return -ENOMEM;
484
485 trace_seq_init(s);
486 trace_seq_printf(s, "%d\n", call->id);
487
488 r = simple_read_from_buffer(ubuf, cnt, ppos,
489 s->buffer, s->len);
490 kfree(s);
491 return r;
492 }
493
494 static ssize_t
495 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
496 loff_t *ppos)
497 {
498 struct ftrace_event_call *call = filp->private_data;
499 struct trace_seq *s;
500 int r;
501
502 if (*ppos)
503 return 0;
504
505 s = kmalloc(sizeof(*s), GFP_KERNEL);
506 if (!s)
507 return -ENOMEM;
508
509 trace_seq_init(s);
510
511 print_event_filter(call, s);
512 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
513
514 kfree(s);
515
516 return r;
517 }
518
519 static ssize_t
520 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
521 loff_t *ppos)
522 {
523 struct ftrace_event_call *call = filp->private_data;
524 char *buf;
525 int err;
526
527 if (cnt >= PAGE_SIZE)
528 return -EINVAL;
529
530 buf = (char *)__get_free_page(GFP_TEMPORARY);
531 if (!buf)
532 return -ENOMEM;
533
534 if (copy_from_user(buf, ubuf, cnt)) {
535 free_page((unsigned long) buf);
536 return -EFAULT;
537 }
538 buf[cnt] = '\0';
539
540 err = apply_event_filter(call, buf);
541 free_page((unsigned long) buf);
542 if (err < 0)
543 return err;
544
545 *ppos += cnt;
546
547 return cnt;
548 }
549
550 static ssize_t
551 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
552 loff_t *ppos)
553 {
554 struct event_subsystem *system = filp->private_data;
555 struct trace_seq *s;
556 int r;
557
558 if (*ppos)
559 return 0;
560
561 s = kmalloc(sizeof(*s), GFP_KERNEL);
562 if (!s)
563 return -ENOMEM;
564
565 trace_seq_init(s);
566
567 print_subsystem_event_filter(system, s);
568 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
569
570 kfree(s);
571
572 return r;
573 }
574
575 static ssize_t
576 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
577 loff_t *ppos)
578 {
579 struct event_subsystem *system = filp->private_data;
580 char *buf;
581 int err;
582
583 if (cnt >= PAGE_SIZE)
584 return -EINVAL;
585
586 buf = (char *)__get_free_page(GFP_TEMPORARY);
587 if (!buf)
588 return -ENOMEM;
589
590 if (copy_from_user(buf, ubuf, cnt)) {
591 free_page((unsigned long) buf);
592 return -EFAULT;
593 }
594 buf[cnt] = '\0';
595
596 err = apply_subsystem_event_filter(system, buf);
597 free_page((unsigned long) buf);
598 if (err < 0)
599 return err;
600
601 *ppos += cnt;
602
603 return cnt;
604 }
605
606 static ssize_t
607 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
608 {
609 int (*func)(struct trace_seq *s) = filp->private_data;
610 struct trace_seq *s;
611 int r;
612
613 if (*ppos)
614 return 0;
615
616 s = kmalloc(sizeof(*s), GFP_KERNEL);
617 if (!s)
618 return -ENOMEM;
619
620 trace_seq_init(s);
621
622 func(s);
623 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
624
625 kfree(s);
626
627 return r;
628 }
629
630 static const struct seq_operations show_event_seq_ops = {
631 .start = t_start,
632 .next = t_next,
633 .show = t_show,
634 .stop = t_stop,
635 };
636
637 static const struct seq_operations show_set_event_seq_ops = {
638 .start = s_start,
639 .next = s_next,
640 .show = t_show,
641 .stop = t_stop,
642 };
643
644 static const struct file_operations ftrace_avail_fops = {
645 .open = ftrace_event_seq_open,
646 .read = seq_read,
647 .llseek = seq_lseek,
648 .release = seq_release,
649 };
650
651 static const struct file_operations ftrace_set_event_fops = {
652 .open = ftrace_event_seq_open,
653 .read = seq_read,
654 .write = ftrace_event_write,
655 .llseek = seq_lseek,
656 .release = seq_release,
657 };
658
659 static const struct file_operations ftrace_enable_fops = {
660 .open = tracing_open_generic,
661 .read = event_enable_read,
662 .write = event_enable_write,
663 };
664
665 static const struct file_operations ftrace_event_format_fops = {
666 .open = tracing_open_generic,
667 .read = event_format_read,
668 };
669
670 static const struct file_operations ftrace_event_id_fops = {
671 .open = tracing_open_generic,
672 .read = event_id_read,
673 };
674
675 static const struct file_operations ftrace_event_filter_fops = {
676 .open = tracing_open_generic,
677 .read = event_filter_read,
678 .write = event_filter_write,
679 };
680
681 static const struct file_operations ftrace_subsystem_filter_fops = {
682 .open = tracing_open_generic,
683 .read = subsystem_filter_read,
684 .write = subsystem_filter_write,
685 };
686
687 static const struct file_operations ftrace_show_header_fops = {
688 .open = tracing_open_generic,
689 .read = show_header,
690 };
691
692 static struct dentry *event_trace_events_dir(void)
693 {
694 static struct dentry *d_tracer;
695 static struct dentry *d_events;
696
697 if (d_events)
698 return d_events;
699
700 d_tracer = tracing_init_dentry();
701 if (!d_tracer)
702 return NULL;
703
704 d_events = debugfs_create_dir("events", d_tracer);
705 if (!d_events)
706 pr_warning("Could not create debugfs "
707 "'events' directory\n");
708
709 return d_events;
710 }
711
712 static LIST_HEAD(event_subsystems);
713
714 static struct dentry *
715 event_subsystem_dir(const char *name, struct dentry *d_events)
716 {
717 struct event_subsystem *system;
718 struct dentry *entry;
719
720 /* First see if we did not already create this dir */
721 list_for_each_entry(system, &event_subsystems, list) {
722 if (strcmp(system->name, name) == 0)
723 return system->entry;
724 }
725
726 /* need to create new entry */
727 system = kmalloc(sizeof(*system), GFP_KERNEL);
728 if (!system) {
729 pr_warning("No memory to create event subsystem %s\n",
730 name);
731 return d_events;
732 }
733
734 system->entry = debugfs_create_dir(name, d_events);
735 if (!system->entry) {
736 pr_warning("Could not create event subsystem %s\n",
737 name);
738 kfree(system);
739 return d_events;
740 }
741
742 system->name = kstrdup(name, GFP_KERNEL);
743 if (!system->name) {
744 debugfs_remove(system->entry);
745 kfree(system);
746 return d_events;
747 }
748
749 list_add(&system->list, &event_subsystems);
750
751 system->filter = NULL;
752
753 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
754 if (!system->filter) {
755 pr_warning("Could not allocate filter for subsystem "
756 "'%s'\n", name);
757 return system->entry;
758 }
759
760 entry = debugfs_create_file("filter", 0644, system->entry, system,
761 &ftrace_subsystem_filter_fops);
762 if (!entry) {
763 kfree(system->filter);
764 system->filter = NULL;
765 pr_warning("Could not create debugfs "
766 "'%s/filter' entry\n", name);
767 }
768
769 return system->entry;
770 }
771
772 static int
773 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
774 const struct file_operations *id,
775 const struct file_operations *enable,
776 const struct file_operations *filter,
777 const struct file_operations *format)
778 {
779 struct dentry *entry;
780 int ret;
781
782 /*
783 * If the trace point header did not define TRACE_SYSTEM
784 * then the system would be called "TRACE_SYSTEM".
785 */
786 if (strcmp(call->system, TRACE_SYSTEM) != 0)
787 d_events = event_subsystem_dir(call->system, d_events);
788
789 if (call->raw_init) {
790 ret = call->raw_init();
791 if (ret < 0) {
792 pr_warning("Could not initialize trace point"
793 " events/%s\n", call->name);
794 return ret;
795 }
796 }
797
798 call->dir = debugfs_create_dir(call->name, d_events);
799 if (!call->dir) {
800 pr_warning("Could not create debugfs "
801 "'%s' directory\n", call->name);
802 return -1;
803 }
804
805 if (call->regfunc)
806 entry = trace_create_file("enable", 0644, call->dir, call,
807 enable);
808
809 if (call->id)
810 entry = trace_create_file("id", 0444, call->dir, call,
811 id);
812
813 if (call->define_fields) {
814 ret = call->define_fields();
815 if (ret < 0) {
816 pr_warning("Could not initialize trace point"
817 " events/%s\n", call->name);
818 return ret;
819 }
820 entry = trace_create_file("filter", 0644, call->dir, call,
821 filter);
822 }
823
824 /* A trace may not want to export its format */
825 if (!call->show_format)
826 return 0;
827
828 entry = trace_create_file("format", 0444, call->dir, call,
829 format);
830
831 return 0;
832 }
833
834 #define for_each_event(event, start, end) \
835 for (event = start; \
836 (unsigned long)event < (unsigned long)end; \
837 event++)
838
839 #ifdef CONFIG_MODULES
840
841 static LIST_HEAD(ftrace_module_file_list);
842
843 /*
844 * Modules must own their file_operations to keep up with
845 * reference counting.
846 */
847 struct ftrace_module_file_ops {
848 struct list_head list;
849 struct module *mod;
850 struct file_operations id;
851 struct file_operations enable;
852 struct file_operations format;
853 struct file_operations filter;
854 };
855
856 static struct ftrace_module_file_ops *
857 trace_create_file_ops(struct module *mod)
858 {
859 struct ftrace_module_file_ops *file_ops;
860
861 /*
862 * This is a bit of a PITA. To allow for correct reference
863 * counting, modules must "own" their file_operations.
864 * To do this, we allocate the file operations that will be
865 * used in the event directory.
866 */
867
868 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
869 if (!file_ops)
870 return NULL;
871
872 file_ops->mod = mod;
873
874 file_ops->id = ftrace_event_id_fops;
875 file_ops->id.owner = mod;
876
877 file_ops->enable = ftrace_enable_fops;
878 file_ops->enable.owner = mod;
879
880 file_ops->filter = ftrace_event_filter_fops;
881 file_ops->filter.owner = mod;
882
883 file_ops->format = ftrace_event_format_fops;
884 file_ops->format.owner = mod;
885
886 list_add(&file_ops->list, &ftrace_module_file_list);
887
888 return file_ops;
889 }
890
891 static void trace_module_add_events(struct module *mod)
892 {
893 struct ftrace_module_file_ops *file_ops = NULL;
894 struct ftrace_event_call *call, *start, *end;
895 struct dentry *d_events;
896
897 start = mod->trace_events;
898 end = mod->trace_events + mod->num_trace_events;
899
900 if (start == end)
901 return;
902
903 d_events = event_trace_events_dir();
904 if (!d_events)
905 return;
906
907 for_each_event(call, start, end) {
908 /* The linker may leave blanks */
909 if (!call->name)
910 continue;
911
912 /*
913 * This module has events, create file ops for this module
914 * if not already done.
915 */
916 if (!file_ops) {
917 file_ops = trace_create_file_ops(mod);
918 if (!file_ops)
919 return;
920 }
921 call->mod = mod;
922 list_add(&call->list, &ftrace_events);
923 event_create_dir(call, d_events,
924 &file_ops->id, &file_ops->enable,
925 &file_ops->filter, &file_ops->format);
926 }
927 }
928
929 static void trace_module_remove_events(struct module *mod)
930 {
931 struct ftrace_module_file_ops *file_ops;
932 struct ftrace_event_call *call, *p;
933
934 list_for_each_entry_safe(call, p, &ftrace_events, list) {
935 if (call->mod == mod) {
936 if (call->enabled) {
937 call->enabled = 0;
938 call->unregfunc();
939 }
940 if (call->event)
941 unregister_ftrace_event(call->event);
942 debugfs_remove_recursive(call->dir);
943 list_del(&call->list);
944 trace_destroy_fields(call);
945 destroy_preds(call);
946 }
947 }
948
949 /* Now free the file_operations */
950 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
951 if (file_ops->mod == mod)
952 break;
953 }
954 if (&file_ops->list != &ftrace_module_file_list) {
955 list_del(&file_ops->list);
956 kfree(file_ops);
957 }
958 }
959
960 static int trace_module_notify(struct notifier_block *self,
961 unsigned long val, void *data)
962 {
963 struct module *mod = data;
964
965 mutex_lock(&event_mutex);
966 switch (val) {
967 case MODULE_STATE_COMING:
968 trace_module_add_events(mod);
969 break;
970 case MODULE_STATE_GOING:
971 trace_module_remove_events(mod);
972 break;
973 }
974 mutex_unlock(&event_mutex);
975
976 return 0;
977 }
978 #else
979 static int trace_module_notify(struct notifier_block *self,
980 unsigned long val, void *data)
981 {
982 return 0;
983 }
984 #endif /* CONFIG_MODULES */
985
986 struct notifier_block trace_module_nb = {
987 .notifier_call = trace_module_notify,
988 .priority = 0,
989 };
990
991 extern struct ftrace_event_call __start_ftrace_events[];
992 extern struct ftrace_event_call __stop_ftrace_events[];
993
994 static __init int event_trace_init(void)
995 {
996 struct ftrace_event_call *call;
997 struct dentry *d_tracer;
998 struct dentry *entry;
999 struct dentry *d_events;
1000 int ret;
1001
1002 d_tracer = tracing_init_dentry();
1003 if (!d_tracer)
1004 return 0;
1005
1006 entry = debugfs_create_file("available_events", 0444, d_tracer,
1007 (void *)&show_event_seq_ops,
1008 &ftrace_avail_fops);
1009 if (!entry)
1010 pr_warning("Could not create debugfs "
1011 "'available_events' entry\n");
1012
1013 entry = debugfs_create_file("set_event", 0644, d_tracer,
1014 (void *)&show_set_event_seq_ops,
1015 &ftrace_set_event_fops);
1016 if (!entry)
1017 pr_warning("Could not create debugfs "
1018 "'set_event' entry\n");
1019
1020 d_events = event_trace_events_dir();
1021 if (!d_events)
1022 return 0;
1023
1024 /* ring buffer internal formats */
1025 trace_create_file("header_page", 0444, d_events,
1026 ring_buffer_print_page_header,
1027 &ftrace_show_header_fops);
1028
1029 trace_create_file("header_event", 0444, d_events,
1030 ring_buffer_print_entry_header,
1031 &ftrace_show_header_fops);
1032
1033 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1034 /* The linker may leave blanks */
1035 if (!call->name)
1036 continue;
1037 list_add(&call->list, &ftrace_events);
1038 event_create_dir(call, d_events, &ftrace_event_id_fops,
1039 &ftrace_enable_fops, &ftrace_event_filter_fops,
1040 &ftrace_event_format_fops);
1041 }
1042
1043 ret = register_module_notifier(&trace_module_nb);
1044 if (!ret)
1045 pr_warning("Failed to register trace events module notifier\n");
1046
1047 return 0;
1048 }
1049 fs_initcall(event_trace_init);
1050
1051 #ifdef CONFIG_FTRACE_STARTUP_TEST
1052
1053 static DEFINE_SPINLOCK(test_spinlock);
1054 static DEFINE_SPINLOCK(test_spinlock_irq);
1055 static DEFINE_MUTEX(test_mutex);
1056
1057 static __init void test_work(struct work_struct *dummy)
1058 {
1059 spin_lock(&test_spinlock);
1060 spin_lock_irq(&test_spinlock_irq);
1061 udelay(1);
1062 spin_unlock_irq(&test_spinlock_irq);
1063 spin_unlock(&test_spinlock);
1064
1065 mutex_lock(&test_mutex);
1066 msleep(1);
1067 mutex_unlock(&test_mutex);
1068 }
1069
1070 static __init int event_test_thread(void *unused)
1071 {
1072 void *test_malloc;
1073
1074 test_malloc = kmalloc(1234, GFP_KERNEL);
1075 if (!test_malloc)
1076 pr_info("failed to kmalloc\n");
1077
1078 schedule_on_each_cpu(test_work);
1079
1080 kfree(test_malloc);
1081
1082 set_current_state(TASK_INTERRUPTIBLE);
1083 while (!kthread_should_stop())
1084 schedule();
1085
1086 return 0;
1087 }
1088
1089 /*
1090 * Do various things that may trigger events.
1091 */
1092 static __init void event_test_stuff(void)
1093 {
1094 struct task_struct *test_thread;
1095
1096 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1097 msleep(1);
1098 kthread_stop(test_thread);
1099 }
1100
1101 /*
1102 * For every trace event defined, we will test each trace point separately,
1103 * and then by groups, and finally all trace points.
1104 */
1105 static __init void event_trace_self_tests(void)
1106 {
1107 struct ftrace_event_call *call;
1108 struct event_subsystem *system;
1109 char *sysname;
1110 int ret;
1111
1112 pr_info("Running tests on trace events:\n");
1113
1114 list_for_each_entry(call, &ftrace_events, list) {
1115
1116 /* Only test those that have a regfunc */
1117 if (!call->regfunc)
1118 continue;
1119
1120 pr_info("Testing event %s: ", call->name);
1121
1122 /*
1123 * If an event is already enabled, someone is using
1124 * it and the self test should not be on.
1125 */
1126 if (call->enabled) {
1127 pr_warning("Enabled event during self test!\n");
1128 WARN_ON_ONCE(1);
1129 continue;
1130 }
1131
1132 call->enabled = 1;
1133 call->regfunc();
1134
1135 event_test_stuff();
1136
1137 call->unregfunc();
1138 call->enabled = 0;
1139
1140 pr_cont("OK\n");
1141 }
1142
1143 /* Now test at the sub system level */
1144
1145 pr_info("Running tests on trace event systems:\n");
1146
1147 list_for_each_entry(system, &event_subsystems, list) {
1148
1149 /* the ftrace system is special, skip it */
1150 if (strcmp(system->name, "ftrace") == 0)
1151 continue;
1152
1153 pr_info("Testing event system %s: ", system->name);
1154
1155 /* ftrace_set_clr_event can modify the name passed in. */
1156 sysname = kstrdup(system->name, GFP_KERNEL);
1157 if (WARN_ON(!sysname)) {
1158 pr_warning("Can't allocate memory, giving up!\n");
1159 return;
1160 }
1161 ret = ftrace_set_clr_event(sysname, 1);
1162 kfree(sysname);
1163 if (WARN_ON_ONCE(ret)) {
1164 pr_warning("error enabling system %s\n",
1165 system->name);
1166 continue;
1167 }
1168
1169 event_test_stuff();
1170
1171 sysname = kstrdup(system->name, GFP_KERNEL);
1172 if (WARN_ON(!sysname)) {
1173 pr_warning("Can't allocate memory, giving up!\n");
1174 return;
1175 }
1176 ret = ftrace_set_clr_event(sysname, 0);
1177 kfree(sysname);
1178
1179 if (WARN_ON_ONCE(ret))
1180 pr_warning("error disabling system %s\n",
1181 system->name);
1182
1183 pr_cont("OK\n");
1184 }
1185
1186 /* Test with all events enabled */
1187
1188 pr_info("Running tests on all trace events:\n");
1189 pr_info("Testing all events: ");
1190
1191 sysname = kmalloc(4, GFP_KERNEL);
1192 if (WARN_ON(!sysname)) {
1193 pr_warning("Can't allocate memory, giving up!\n");
1194 return;
1195 }
1196 memcpy(sysname, "*:*", 4);
1197 ret = ftrace_set_clr_event(sysname, 1);
1198 if (WARN_ON_ONCE(ret)) {
1199 kfree(sysname);
1200 pr_warning("error enabling all events\n");
1201 return;
1202 }
1203
1204 event_test_stuff();
1205
1206 /* reset sysname */
1207 memcpy(sysname, "*:*", 4);
1208 ret = ftrace_set_clr_event(sysname, 0);
1209 kfree(sysname);
1210
1211 if (WARN_ON_ONCE(ret)) {
1212 pr_warning("error disabling all events\n");
1213 return;
1214 }
1215
1216 pr_cont("OK\n");
1217 }
1218
1219 #ifdef CONFIG_FUNCTION_TRACER
1220
1221 static DEFINE_PER_CPU(atomic_t, test_event_disable);
1222
1223 static void
1224 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1225 {
1226 struct ring_buffer_event *event;
1227 struct ftrace_entry *entry;
1228 unsigned long flags;
1229 long disabled;
1230 int resched;
1231 int cpu;
1232 int pc;
1233
1234 pc = preempt_count();
1235 resched = ftrace_preempt_disable();
1236 cpu = raw_smp_processor_id();
1237 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1238
1239 if (disabled != 1)
1240 goto out;
1241
1242 local_save_flags(flags);
1243
1244 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1245 flags, pc);
1246 if (!event)
1247 goto out;
1248 entry = ring_buffer_event_data(event);
1249 entry->ip = ip;
1250 entry->parent_ip = parent_ip;
1251
1252 trace_nowake_buffer_unlock_commit(event, flags, pc);
1253
1254 out:
1255 atomic_dec(&per_cpu(test_event_disable, cpu));
1256 ftrace_preempt_enable(resched);
1257 }
1258
1259 static struct ftrace_ops trace_ops __initdata =
1260 {
1261 .func = function_test_events_call,
1262 };
1263
1264 static __init void event_trace_self_test_with_function(void)
1265 {
1266 register_ftrace_function(&trace_ops);
1267 pr_info("Running tests again, along with the function tracer\n");
1268 event_trace_self_tests();
1269 unregister_ftrace_function(&trace_ops);
1270 }
1271 #else
1272 static __init void event_trace_self_test_with_function(void)
1273 {
1274 }
1275 #endif
1276
1277 static __init int event_trace_self_tests_init(void)
1278 {
1279
1280 event_trace_self_tests();
1281
1282 event_trace_self_test_with_function();
1283
1284 return 0;
1285 }
1286
1287 late_initcall(event_trace_self_tests_init);
1288
1289 #endif
This page took 0.081748 seconds and 5 git commands to generate.