6591d83e1e7ade6e46989a8441c27cf8b005095e
[deliverable/linux.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/debugfs.h>
12 #include <linux/uaccess.h>
13 #include <linux/module.h>
14 #include <linux/ctype.h>
15
16 #include "trace_output.h"
17
18 #define TRACE_SYSTEM "TRACE_SYSTEM"
19
20 static DEFINE_MUTEX(event_mutex);
21
22 LIST_HEAD(ftrace_events);
23
24 int trace_define_field(struct ftrace_event_call *call, char *type,
25 char *name, int offset, int size)
26 {
27 struct ftrace_event_field *field;
28
29 field = kzalloc(sizeof(*field), GFP_KERNEL);
30 if (!field)
31 goto err;
32
33 field->name = kstrdup(name, GFP_KERNEL);
34 if (!field->name)
35 goto err;
36
37 field->type = kstrdup(type, GFP_KERNEL);
38 if (!field->type)
39 goto err;
40
41 field->offset = offset;
42 field->size = size;
43 list_add(&field->link, &call->fields);
44
45 return 0;
46
47 err:
48 if (field) {
49 kfree(field->name);
50 kfree(field->type);
51 }
52 kfree(field);
53
54 return -ENOMEM;
55 }
56 EXPORT_SYMBOL_GPL(trace_define_field);
57
58 static void ftrace_clear_events(void)
59 {
60 struct ftrace_event_call *call;
61
62 list_for_each_entry(call, &ftrace_events, list) {
63
64 if (call->enabled) {
65 call->enabled = 0;
66 call->unregfunc();
67 }
68 }
69 }
70
71 static void ftrace_event_enable_disable(struct ftrace_event_call *call,
72 int enable)
73 {
74
75 switch (enable) {
76 case 0:
77 if (call->enabled) {
78 call->enabled = 0;
79 call->unregfunc();
80 }
81 break;
82 case 1:
83 if (!call->enabled) {
84 call->enabled = 1;
85 call->regfunc();
86 }
87 break;
88 }
89 }
90
91 static int ftrace_set_clr_event(char *buf, int set)
92 {
93 struct ftrace_event_call *call;
94 char *event = NULL, *sub = NULL, *match;
95 int ret = -EINVAL;
96
97 /*
98 * The buf format can be <subsystem>:<event-name>
99 * *:<event-name> means any event by that name.
100 * :<event-name> is the same.
101 *
102 * <subsystem>:* means all events in that subsystem
103 * <subsystem>: means the same.
104 *
105 * <name> (no ':') means all events in a subsystem with
106 * the name <name> or any event that matches <name>
107 */
108
109 match = strsep(&buf, ":");
110 if (buf) {
111 sub = match;
112 event = buf;
113 match = NULL;
114
115 if (!strlen(sub) || strcmp(sub, "*") == 0)
116 sub = NULL;
117 if (!strlen(event) || strcmp(event, "*") == 0)
118 event = NULL;
119 }
120
121 mutex_lock(&event_mutex);
122 list_for_each_entry(call, &ftrace_events, list) {
123
124 if (!call->name || !call->regfunc)
125 continue;
126
127 if (match &&
128 strcmp(match, call->name) != 0 &&
129 strcmp(match, call->system) != 0)
130 continue;
131
132 if (sub && strcmp(sub, call->system) != 0)
133 continue;
134
135 if (event && strcmp(event, call->name) != 0)
136 continue;
137
138 ftrace_event_enable_disable(call, set);
139
140 ret = 0;
141 }
142 mutex_unlock(&event_mutex);
143
144 return ret;
145 }
146
147 /* 128 should be much more than enough */
148 #define EVENT_BUF_SIZE 127
149
150 static ssize_t
151 ftrace_event_write(struct file *file, const char __user *ubuf,
152 size_t cnt, loff_t *ppos)
153 {
154 size_t read = 0;
155 int i, set = 1;
156 ssize_t ret;
157 char *buf;
158 char ch;
159
160 if (!cnt || cnt < 0)
161 return 0;
162
163 ret = tracing_update_buffers();
164 if (ret < 0)
165 return ret;
166
167 ret = get_user(ch, ubuf++);
168 if (ret)
169 return ret;
170 read++;
171 cnt--;
172
173 /* skip white space */
174 while (cnt && isspace(ch)) {
175 ret = get_user(ch, ubuf++);
176 if (ret)
177 return ret;
178 read++;
179 cnt--;
180 }
181
182 /* Only white space found? */
183 if (isspace(ch)) {
184 file->f_pos += read;
185 ret = read;
186 return ret;
187 }
188
189 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
190 if (!buf)
191 return -ENOMEM;
192
193 if (cnt > EVENT_BUF_SIZE)
194 cnt = EVENT_BUF_SIZE;
195
196 i = 0;
197 while (cnt && !isspace(ch)) {
198 if (!i && ch == '!')
199 set = 0;
200 else
201 buf[i++] = ch;
202
203 ret = get_user(ch, ubuf++);
204 if (ret)
205 goto out_free;
206 read++;
207 cnt--;
208 }
209 buf[i] = 0;
210
211 file->f_pos += read;
212
213 ret = ftrace_set_clr_event(buf, set);
214 if (ret)
215 goto out_free;
216
217 ret = read;
218
219 out_free:
220 kfree(buf);
221
222 return ret;
223 }
224
225 static void *
226 t_next(struct seq_file *m, void *v, loff_t *pos)
227 {
228 struct list_head *list = m->private;
229 struct ftrace_event_call *call;
230
231 (*pos)++;
232
233 for (;;) {
234 if (list == &ftrace_events)
235 return NULL;
236
237 call = list_entry(list, struct ftrace_event_call, list);
238
239 /*
240 * The ftrace subsystem is for showing formats only.
241 * They can not be enabled or disabled via the event files.
242 */
243 if (call->regfunc)
244 break;
245
246 list = list->next;
247 }
248
249 m->private = list->next;
250
251 return call;
252 }
253
254 static void *t_start(struct seq_file *m, loff_t *pos)
255 {
256 return t_next(m, NULL, pos);
257 }
258
259 static void *
260 s_next(struct seq_file *m, void *v, loff_t *pos)
261 {
262 struct list_head *list = m->private;
263 struct ftrace_event_call *call;
264
265 (*pos)++;
266
267 retry:
268 if (list == &ftrace_events)
269 return NULL;
270
271 call = list_entry(list, struct ftrace_event_call, list);
272
273 if (!call->enabled) {
274 list = list->next;
275 goto retry;
276 }
277
278 m->private = list->next;
279
280 return call;
281 }
282
283 static void *s_start(struct seq_file *m, loff_t *pos)
284 {
285 return s_next(m, NULL, pos);
286 }
287
288 static int t_show(struct seq_file *m, void *v)
289 {
290 struct ftrace_event_call *call = v;
291
292 if (strcmp(call->system, TRACE_SYSTEM) != 0)
293 seq_printf(m, "%s:", call->system);
294 seq_printf(m, "%s\n", call->name);
295
296 return 0;
297 }
298
299 static void t_stop(struct seq_file *m, void *p)
300 {
301 }
302
303 static int
304 ftrace_event_seq_open(struct inode *inode, struct file *file)
305 {
306 int ret;
307 const struct seq_operations *seq_ops;
308
309 if ((file->f_mode & FMODE_WRITE) &&
310 !(file->f_flags & O_APPEND))
311 ftrace_clear_events();
312
313 seq_ops = inode->i_private;
314 ret = seq_open(file, seq_ops);
315 if (!ret) {
316 struct seq_file *m = file->private_data;
317
318 m->private = ftrace_events.next;
319 }
320 return ret;
321 }
322
323 static ssize_t
324 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
325 loff_t *ppos)
326 {
327 struct ftrace_event_call *call = filp->private_data;
328 char *buf;
329
330 if (call->enabled)
331 buf = "1\n";
332 else
333 buf = "0\n";
334
335 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
336 }
337
338 static ssize_t
339 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
340 loff_t *ppos)
341 {
342 struct ftrace_event_call *call = filp->private_data;
343 char buf[64];
344 unsigned long val;
345 int ret;
346
347 if (cnt >= sizeof(buf))
348 return -EINVAL;
349
350 if (copy_from_user(&buf, ubuf, cnt))
351 return -EFAULT;
352
353 buf[cnt] = 0;
354
355 ret = strict_strtoul(buf, 10, &val);
356 if (ret < 0)
357 return ret;
358
359 ret = tracing_update_buffers();
360 if (ret < 0)
361 return ret;
362
363 switch (val) {
364 case 0:
365 case 1:
366 mutex_lock(&event_mutex);
367 ftrace_event_enable_disable(call, val);
368 mutex_unlock(&event_mutex);
369 break;
370
371 default:
372 return -EINVAL;
373 }
374
375 *ppos += cnt;
376
377 return cnt;
378 }
379
380 #undef FIELD
381 #define FIELD(type, name) \
382 #type, "common_" #name, offsetof(typeof(field), name), \
383 sizeof(field.name)
384
385 static int trace_write_header(struct trace_seq *s)
386 {
387 struct trace_entry field;
388
389 /* struct trace_entry */
390 return trace_seq_printf(s,
391 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
392 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
393 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
394 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
395 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
396 "\n",
397 FIELD(unsigned char, type),
398 FIELD(unsigned char, flags),
399 FIELD(unsigned char, preempt_count),
400 FIELD(int, pid),
401 FIELD(int, tgid));
402 }
403
404 static ssize_t
405 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
406 loff_t *ppos)
407 {
408 struct ftrace_event_call *call = filp->private_data;
409 struct trace_seq *s;
410 char *buf;
411 int r;
412
413 if (*ppos)
414 return 0;
415
416 s = kmalloc(sizeof(*s), GFP_KERNEL);
417 if (!s)
418 return -ENOMEM;
419
420 trace_seq_init(s);
421
422 /* If any of the first writes fail, so will the show_format. */
423
424 trace_seq_printf(s, "name: %s\n", call->name);
425 trace_seq_printf(s, "ID: %d\n", call->id);
426 trace_seq_printf(s, "format:\n");
427 trace_write_header(s);
428
429 r = call->show_format(s);
430 if (!r) {
431 /*
432 * ug! The format output is bigger than a PAGE!!
433 */
434 buf = "FORMAT TOO BIG\n";
435 r = simple_read_from_buffer(ubuf, cnt, ppos,
436 buf, strlen(buf));
437 goto out;
438 }
439
440 r = simple_read_from_buffer(ubuf, cnt, ppos,
441 s->buffer, s->len);
442 out:
443 kfree(s);
444 return r;
445 }
446
447 static ssize_t
448 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
449 {
450 struct ftrace_event_call *call = filp->private_data;
451 struct trace_seq *s;
452 int r;
453
454 if (*ppos)
455 return 0;
456
457 s = kmalloc(sizeof(*s), GFP_KERNEL);
458 if (!s)
459 return -ENOMEM;
460
461 trace_seq_init(s);
462 trace_seq_printf(s, "%d\n", call->id);
463
464 r = simple_read_from_buffer(ubuf, cnt, ppos,
465 s->buffer, s->len);
466 kfree(s);
467 return r;
468 }
469
470 static ssize_t
471 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
472 loff_t *ppos)
473 {
474 struct ftrace_event_call *call = filp->private_data;
475 struct trace_seq *s;
476 int r;
477
478 if (*ppos)
479 return 0;
480
481 s = kmalloc(sizeof(*s), GFP_KERNEL);
482 if (!s)
483 return -ENOMEM;
484
485 trace_seq_init(s);
486
487 filter_print_preds(call->preds, call->n_preds, s);
488 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
489
490 kfree(s);
491
492 return r;
493 }
494
495 static ssize_t
496 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
497 loff_t *ppos)
498 {
499 struct ftrace_event_call *call = filp->private_data;
500 char buf[64], *pbuf = buf;
501 struct filter_pred *pred;
502 int err;
503
504 if (cnt >= sizeof(buf))
505 return -EINVAL;
506
507 if (copy_from_user(&buf, ubuf, cnt))
508 return -EFAULT;
509 buf[cnt] = '\0';
510
511 pred = kzalloc(sizeof(*pred), GFP_KERNEL);
512 if (!pred)
513 return -ENOMEM;
514
515 err = filter_parse(&pbuf, pred);
516 if (err < 0) {
517 filter_free_pred(pred);
518 return err;
519 }
520
521 if (pred->clear) {
522 filter_disable_preds(call);
523 filter_free_pred(pred);
524 return cnt;
525 }
526
527 err = filter_add_pred(call, pred);
528 if (err < 0) {
529 filter_free_pred(pred);
530 return err;
531 }
532
533 filter_free_pred(pred);
534
535 *ppos += cnt;
536
537 return cnt;
538 }
539
540 static ssize_t
541 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
542 loff_t *ppos)
543 {
544 struct event_subsystem *system = filp->private_data;
545 struct trace_seq *s;
546 int r;
547
548 if (*ppos)
549 return 0;
550
551 s = kmalloc(sizeof(*s), GFP_KERNEL);
552 if (!s)
553 return -ENOMEM;
554
555 trace_seq_init(s);
556
557 filter_print_preds(system->preds, system->n_preds, s);
558 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
559
560 kfree(s);
561
562 return r;
563 }
564
565 static ssize_t
566 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
567 loff_t *ppos)
568 {
569 struct event_subsystem *system = filp->private_data;
570 char buf[64], *pbuf = buf;
571 struct filter_pred *pred;
572 int err;
573
574 if (cnt >= sizeof(buf))
575 return -EINVAL;
576
577 if (copy_from_user(&buf, ubuf, cnt))
578 return -EFAULT;
579 buf[cnt] = '\0';
580
581 pred = kzalloc(sizeof(*pred), GFP_KERNEL);
582 if (!pred)
583 return -ENOMEM;
584
585 err = filter_parse(&pbuf, pred);
586 if (err < 0) {
587 filter_free_pred(pred);
588 return err;
589 }
590
591 if (pred->clear) {
592 filter_free_subsystem_preds(system);
593 filter_free_pred(pred);
594 return cnt;
595 }
596
597 err = filter_add_subsystem_pred(system, pred);
598 if (err < 0) {
599 filter_free_subsystem_preds(system);
600 filter_free_pred(pred);
601 return err;
602 }
603
604 *ppos += cnt;
605
606 return cnt;
607 }
608
609 static const struct seq_operations show_event_seq_ops = {
610 .start = t_start,
611 .next = t_next,
612 .show = t_show,
613 .stop = t_stop,
614 };
615
616 static const struct seq_operations show_set_event_seq_ops = {
617 .start = s_start,
618 .next = s_next,
619 .show = t_show,
620 .stop = t_stop,
621 };
622
623 static const struct file_operations ftrace_avail_fops = {
624 .open = ftrace_event_seq_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = seq_release,
628 };
629
630 static const struct file_operations ftrace_set_event_fops = {
631 .open = ftrace_event_seq_open,
632 .read = seq_read,
633 .write = ftrace_event_write,
634 .llseek = seq_lseek,
635 .release = seq_release,
636 };
637
638 static const struct file_operations ftrace_enable_fops = {
639 .open = tracing_open_generic,
640 .read = event_enable_read,
641 .write = event_enable_write,
642 };
643
644 static const struct file_operations ftrace_event_format_fops = {
645 .open = tracing_open_generic,
646 .read = event_format_read,
647 };
648
649 static const struct file_operations ftrace_event_id_fops = {
650 .open = tracing_open_generic,
651 .read = event_id_read,
652 };
653
654 static const struct file_operations ftrace_event_filter_fops = {
655 .open = tracing_open_generic,
656 .read = event_filter_read,
657 .write = event_filter_write,
658 };
659
660 static const struct file_operations ftrace_subsystem_filter_fops = {
661 .open = tracing_open_generic,
662 .read = subsystem_filter_read,
663 .write = subsystem_filter_write,
664 };
665
666 static struct dentry *event_trace_events_dir(void)
667 {
668 static struct dentry *d_tracer;
669 static struct dentry *d_events;
670
671 if (d_events)
672 return d_events;
673
674 d_tracer = tracing_init_dentry();
675 if (!d_tracer)
676 return NULL;
677
678 d_events = debugfs_create_dir("events", d_tracer);
679 if (!d_events)
680 pr_warning("Could not create debugfs "
681 "'events' directory\n");
682
683 return d_events;
684 }
685
686 static LIST_HEAD(event_subsystems);
687
688 static struct dentry *
689 event_subsystem_dir(const char *name, struct dentry *d_events)
690 {
691 struct event_subsystem *system;
692 struct dentry *entry;
693
694 /* First see if we did not already create this dir */
695 list_for_each_entry(system, &event_subsystems, list) {
696 if (strcmp(system->name, name) == 0)
697 return system->entry;
698 }
699
700 /* need to create new entry */
701 system = kmalloc(sizeof(*system), GFP_KERNEL);
702 if (!system) {
703 pr_warning("No memory to create event subsystem %s\n",
704 name);
705 return d_events;
706 }
707
708 system->entry = debugfs_create_dir(name, d_events);
709 if (!system->entry) {
710 pr_warning("Could not create event subsystem %s\n",
711 name);
712 kfree(system);
713 return d_events;
714 }
715
716 system->name = kstrdup(name, GFP_KERNEL);
717 if (!system->name) {
718 debugfs_remove(system->entry);
719 kfree(system);
720 return d_events;
721 }
722
723 list_add(&system->list, &event_subsystems);
724
725 system->preds = NULL;
726 system->n_preds = 0;
727
728 entry = debugfs_create_file("filter", 0644, system->entry, system,
729 &ftrace_subsystem_filter_fops);
730 if (!entry)
731 pr_warning("Could not create debugfs "
732 "'%s/filter' entry\n", name);
733
734 return system->entry;
735 }
736
737 static int
738 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
739 {
740 struct dentry *entry;
741 int ret;
742
743 /*
744 * If the trace point header did not define TRACE_SYSTEM
745 * then the system would be called "TRACE_SYSTEM".
746 */
747 if (strcmp(call->system, TRACE_SYSTEM) != 0)
748 d_events = event_subsystem_dir(call->system, d_events);
749
750 if (call->raw_init) {
751 ret = call->raw_init();
752 if (ret < 0) {
753 pr_warning("Could not initialize trace point"
754 " events/%s\n", call->name);
755 return ret;
756 }
757 }
758
759 call->dir = debugfs_create_dir(call->name, d_events);
760 if (!call->dir) {
761 pr_warning("Could not create debugfs "
762 "'%s' directory\n", call->name);
763 return -1;
764 }
765
766 if (call->regfunc)
767 entry = trace_create_file("enable", 0644, call->dir, call,
768 &ftrace_enable_fops);
769
770 if (call->id)
771 entry = trace_create_file("id", 0444, call->dir, call,
772 &ftrace_event_id_fops);
773
774 if (call->define_fields) {
775 ret = call->define_fields();
776 if (ret < 0) {
777 pr_warning("Could not initialize trace point"
778 " events/%s\n", call->name);
779 return ret;
780 }
781 entry = trace_create_file("filter", 0644, call->dir, call,
782 &ftrace_event_filter_fops);
783 }
784
785 /* A trace may not want to export its format */
786 if (!call->show_format)
787 return 0;
788
789 entry = trace_create_file("format", 0444, call->dir, call,
790 &ftrace_event_format_fops);
791
792 return 0;
793 }
794
795 #define for_each_event(event, start, end) \
796 for (event = start; \
797 (unsigned long)event < (unsigned long)end; \
798 event++)
799
800 #ifdef CONFIG_MODULES
801 static void trace_module_add_events(struct module *mod)
802 {
803 struct ftrace_event_call *call, *start, *end;
804 struct dentry *d_events;
805
806 start = mod->trace_events;
807 end = mod->trace_events + mod->num_trace_events;
808
809 if (start == end)
810 return;
811
812 d_events = event_trace_events_dir();
813 if (!d_events)
814 return;
815
816 for_each_event(call, start, end) {
817 /* The linker may leave blanks */
818 if (!call->name)
819 continue;
820 call->mod = mod;
821 list_add(&call->list, &ftrace_events);
822 event_create_dir(call, d_events);
823 }
824 }
825
826 static void trace_module_remove_events(struct module *mod)
827 {
828 struct ftrace_event_call *call, *p;
829
830 list_for_each_entry_safe(call, p, &ftrace_events, list) {
831 if (call->mod == mod) {
832 if (call->enabled) {
833 call->enabled = 0;
834 call->unregfunc();
835 }
836 if (call->event)
837 unregister_ftrace_event(call->event);
838 debugfs_remove_recursive(call->dir);
839 list_del(&call->list);
840 }
841 }
842 }
843
844 static int trace_module_notify(struct notifier_block *self,
845 unsigned long val, void *data)
846 {
847 struct module *mod = data;
848
849 mutex_lock(&event_mutex);
850 switch (val) {
851 case MODULE_STATE_COMING:
852 trace_module_add_events(mod);
853 break;
854 case MODULE_STATE_GOING:
855 trace_module_remove_events(mod);
856 break;
857 }
858 mutex_unlock(&event_mutex);
859
860 return 0;
861 }
862 #else
863 static int trace_module_notify(struct notifier_block *self,
864 unsigned long val, void *data)
865 {
866 return 0;
867 }
868 #endif /* CONFIG_MODULES */
869
870 struct notifier_block trace_module_nb = {
871 .notifier_call = trace_module_notify,
872 .priority = 0,
873 };
874
875 extern struct ftrace_event_call __start_ftrace_events[];
876 extern struct ftrace_event_call __stop_ftrace_events[];
877
878 static __init int event_trace_init(void)
879 {
880 struct ftrace_event_call *call;
881 struct dentry *d_tracer;
882 struct dentry *entry;
883 struct dentry *d_events;
884 int ret;
885
886 d_tracer = tracing_init_dentry();
887 if (!d_tracer)
888 return 0;
889
890 entry = debugfs_create_file("available_events", 0444, d_tracer,
891 (void *)&show_event_seq_ops,
892 &ftrace_avail_fops);
893 if (!entry)
894 pr_warning("Could not create debugfs "
895 "'available_events' entry\n");
896
897 entry = debugfs_create_file("set_event", 0644, d_tracer,
898 (void *)&show_set_event_seq_ops,
899 &ftrace_set_event_fops);
900 if (!entry)
901 pr_warning("Could not create debugfs "
902 "'set_event' entry\n");
903
904 d_events = event_trace_events_dir();
905 if (!d_events)
906 return 0;
907
908 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
909 /* The linker may leave blanks */
910 if (!call->name)
911 continue;
912 list_add(&call->list, &ftrace_events);
913 event_create_dir(call, d_events);
914 }
915
916 ret = register_module_notifier(&trace_module_nb);
917 if (!ret)
918 pr_warning("Failed to register trace events module notifier\n");
919
920 return 0;
921 }
922 fs_initcall(event_trace_init);
This page took 0.058609 seconds and 4 git commands to generate.