tracing/events: clean up for ftrace_set_clr_event()
[deliverable/linux.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
e6187007 18#include <linux/delay.h>
b77e38aa 19
91729ef9 20#include "trace_output.h"
b77e38aa 21
b628b3e6
SR
22#define TRACE_SYSTEM "TRACE_SYSTEM"
23
20c8928a 24DEFINE_MUTEX(event_mutex);
11a241a3 25
a59fd602
SR
26LIST_HEAD(ftrace_events);
27
cf027f64 28int trace_define_field(struct ftrace_event_call *call, char *type,
a118e4d1 29 char *name, int offset, int size, int is_signed)
cf027f64
TZ
30{
31 struct ftrace_event_field *field;
32
fe9f57f2 33 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
34 if (!field)
35 goto err;
fe9f57f2 36
cf027f64
TZ
37 field->name = kstrdup(name, GFP_KERNEL);
38 if (!field->name)
39 goto err;
fe9f57f2 40
cf027f64
TZ
41 field->type = kstrdup(type, GFP_KERNEL);
42 if (!field->type)
43 goto err;
fe9f57f2 44
cf027f64
TZ
45 field->offset = offset;
46 field->size = size;
a118e4d1 47 field->is_signed = is_signed;
cf027f64
TZ
48 list_add(&field->link, &call->fields);
49
50 return 0;
fe9f57f2 51
cf027f64
TZ
52err:
53 if (field) {
54 kfree(field->name);
55 kfree(field->type);
56 }
57 kfree(field);
fe9f57f2 58
cf027f64
TZ
59 return -ENOMEM;
60}
17c873ec 61EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 62
2df75e41
LZ
63#ifdef CONFIG_MODULES
64
65static void trace_destroy_fields(struct ftrace_event_call *call)
66{
67 struct ftrace_event_field *field, *next;
68
69 list_for_each_entry_safe(field, next, &call->fields, link) {
70 list_del(&field->link);
71 kfree(field->type);
72 kfree(field->name);
73 kfree(field);
74 }
75}
76
77#endif /* CONFIG_MODULES */
78
b77e38aa
SR
79static void ftrace_clear_events(void)
80{
a59fd602 81 struct ftrace_event_call *call;
b77e38aa 82
20c8928a 83 mutex_lock(&event_mutex);
a59fd602 84 list_for_each_entry(call, &ftrace_events, list) {
b77e38aa
SR
85
86 if (call->enabled) {
87 call->enabled = 0;
88 call->unregfunc();
89 }
b77e38aa 90 }
20c8928a 91 mutex_unlock(&event_mutex);
b77e38aa
SR
92}
93
fd994989
SR
94static void ftrace_event_enable_disable(struct ftrace_event_call *call,
95 int enable)
96{
97
98 switch (enable) {
99 case 0:
100 if (call->enabled) {
101 call->enabled = 0;
102 call->unregfunc();
103 }
fd994989
SR
104 break;
105 case 1:
da4d0302 106 if (!call->enabled) {
fd994989
SR
107 call->enabled = 1;
108 call->regfunc();
109 }
fd994989
SR
110 break;
111 }
112}
113
8f31bfe5
LZ
114/*
115 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
116 */
117static int __ftrace_set_clr_event(const char *match, const char *sub,
118 const char *event, int set)
b77e38aa 119{
a59fd602 120 struct ftrace_event_call *call;
8f31bfe5
LZ
121 int ret;
122
123 mutex_lock(&event_mutex);
124 list_for_each_entry(call, &ftrace_events, list) {
125
126 if (!call->name || !call->regfunc)
127 continue;
128
129 if (match &&
130 strcmp(match, call->name) != 0 &&
131 strcmp(match, call->system) != 0)
132 continue;
133
134 if (sub && strcmp(sub, call->system) != 0)
135 continue;
136
137 if (event && strcmp(event, call->name) != 0)
138 continue;
139
140 ftrace_event_enable_disable(call, set);
141
142 ret = 0;
143 }
144 mutex_unlock(&event_mutex);
145
146 return ret;
147}
148
149static int ftrace_set_clr_event(char *buf, int set)
150{
b628b3e6 151 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
152
153 /*
154 * The buf format can be <subsystem>:<event-name>
155 * *:<event-name> means any event by that name.
156 * :<event-name> is the same.
157 *
158 * <subsystem>:* means all events in that subsystem
159 * <subsystem>: means the same.
160 *
161 * <name> (no ':') means all events in a subsystem with
162 * the name <name> or any event that matches <name>
163 */
164
165 match = strsep(&buf, ":");
166 if (buf) {
167 sub = match;
168 event = buf;
169 match = NULL;
170
171 if (!strlen(sub) || strcmp(sub, "*") == 0)
172 sub = NULL;
173 if (!strlen(event) || strcmp(event, "*") == 0)
174 event = NULL;
175 }
b77e38aa 176
8f31bfe5 177 return __ftrace_set_clr_event(match, sub, event, set);
b77e38aa
SR
178}
179
180/* 128 should be much more than enough */
181#define EVENT_BUF_SIZE 127
182
183static ssize_t
184ftrace_event_write(struct file *file, const char __user *ubuf,
185 size_t cnt, loff_t *ppos)
186{
187 size_t read = 0;
188 int i, set = 1;
189 ssize_t ret;
190 char *buf;
191 char ch;
192
193 if (!cnt || cnt < 0)
194 return 0;
195
1852fcce
SR
196 ret = tracing_update_buffers();
197 if (ret < 0)
198 return ret;
199
b77e38aa
SR
200 ret = get_user(ch, ubuf++);
201 if (ret)
202 return ret;
203 read++;
204 cnt--;
205
206 /* skip white space */
207 while (cnt && isspace(ch)) {
208 ret = get_user(ch, ubuf++);
209 if (ret)
210 return ret;
211 read++;
212 cnt--;
213 }
214
215 /* Only white space found? */
216 if (isspace(ch)) {
217 file->f_pos += read;
218 ret = read;
219 return ret;
220 }
221
222 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
223 if (!buf)
224 return -ENOMEM;
225
226 if (cnt > EVENT_BUF_SIZE)
227 cnt = EVENT_BUF_SIZE;
228
229 i = 0;
230 while (cnt && !isspace(ch)) {
231 if (!i && ch == '!')
232 set = 0;
233 else
234 buf[i++] = ch;
235
236 ret = get_user(ch, ubuf++);
237 if (ret)
238 goto out_free;
239 read++;
240 cnt--;
241 }
242 buf[i] = 0;
243
244 file->f_pos += read;
245
246 ret = ftrace_set_clr_event(buf, set);
247 if (ret)
248 goto out_free;
249
250 ret = read;
251
252 out_free:
253 kfree(buf);
254
255 return ret;
256}
257
258static void *
259t_next(struct seq_file *m, void *v, loff_t *pos)
260{
a59fd602
SR
261 struct list_head *list = m->private;
262 struct ftrace_event_call *call;
b77e38aa
SR
263
264 (*pos)++;
265
40e26815 266 for (;;) {
a59fd602 267 if (list == &ftrace_events)
40e26815
SR
268 return NULL;
269
a59fd602
SR
270 call = list_entry(list, struct ftrace_event_call, list);
271
40e26815
SR
272 /*
273 * The ftrace subsystem is for showing formats only.
274 * They can not be enabled or disabled via the event files.
275 */
276 if (call->regfunc)
277 break;
278
a59fd602 279 list = list->next;
40e26815 280 }
b77e38aa 281
a59fd602 282 m->private = list->next;
b77e38aa
SR
283
284 return call;
285}
286
287static void *t_start(struct seq_file *m, loff_t *pos)
288{
20c8928a
LZ
289 mutex_lock(&event_mutex);
290 if (*pos == 0)
291 m->private = ftrace_events.next;
b77e38aa
SR
292 return t_next(m, NULL, pos);
293}
294
295static void *
296s_next(struct seq_file *m, void *v, loff_t *pos)
297{
a59fd602
SR
298 struct list_head *list = m->private;
299 struct ftrace_event_call *call;
b77e38aa
SR
300
301 (*pos)++;
302
303 retry:
a59fd602 304 if (list == &ftrace_events)
b77e38aa
SR
305 return NULL;
306
a59fd602
SR
307 call = list_entry(list, struct ftrace_event_call, list);
308
b77e38aa 309 if (!call->enabled) {
a59fd602 310 list = list->next;
b77e38aa
SR
311 goto retry;
312 }
313
a59fd602 314 m->private = list->next;
b77e38aa
SR
315
316 return call;
317}
318
319static void *s_start(struct seq_file *m, loff_t *pos)
320{
20c8928a
LZ
321 mutex_lock(&event_mutex);
322 if (*pos == 0)
323 m->private = ftrace_events.next;
b77e38aa
SR
324 return s_next(m, NULL, pos);
325}
326
327static int t_show(struct seq_file *m, void *v)
328{
329 struct ftrace_event_call *call = v;
330
b628b3e6
SR
331 if (strcmp(call->system, TRACE_SYSTEM) != 0)
332 seq_printf(m, "%s:", call->system);
b77e38aa
SR
333 seq_printf(m, "%s\n", call->name);
334
335 return 0;
336}
337
338static void t_stop(struct seq_file *m, void *p)
339{
20c8928a 340 mutex_unlock(&event_mutex);
b77e38aa
SR
341}
342
343static int
344ftrace_event_seq_open(struct inode *inode, struct file *file)
345{
b77e38aa
SR
346 const struct seq_operations *seq_ops;
347
348 if ((file->f_mode & FMODE_WRITE) &&
349 !(file->f_flags & O_APPEND))
350 ftrace_clear_events();
351
352 seq_ops = inode->i_private;
20c8928a 353 return seq_open(file, seq_ops);
b77e38aa
SR
354}
355
1473e441
SR
356static ssize_t
357event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
358 loff_t *ppos)
359{
360 struct ftrace_event_call *call = filp->private_data;
361 char *buf;
362
da4d0302 363 if (call->enabled)
1473e441
SR
364 buf = "1\n";
365 else
366 buf = "0\n";
367
368 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
369}
370
371static ssize_t
372event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
373 loff_t *ppos)
374{
375 struct ftrace_event_call *call = filp->private_data;
376 char buf[64];
377 unsigned long val;
378 int ret;
379
380 if (cnt >= sizeof(buf))
381 return -EINVAL;
382
383 if (copy_from_user(&buf, ubuf, cnt))
384 return -EFAULT;
385
386 buf[cnt] = 0;
387
388 ret = strict_strtoul(buf, 10, &val);
389 if (ret < 0)
390 return ret;
391
1852fcce
SR
392 ret = tracing_update_buffers();
393 if (ret < 0)
394 return ret;
395
1473e441
SR
396 switch (val) {
397 case 0:
1473e441 398 case 1:
11a241a3 399 mutex_lock(&event_mutex);
fd994989 400 ftrace_event_enable_disable(call, val);
11a241a3 401 mutex_unlock(&event_mutex);
1473e441
SR
402 break;
403
404 default:
405 return -EINVAL;
406 }
407
408 *ppos += cnt;
409
410 return cnt;
411}
412
8ae79a13
SR
413static ssize_t
414system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
415 loff_t *ppos)
416{
417 const char *system = filp->private_data;
418 struct ftrace_event_call *call;
419 char buf[2];
420 int set = -1;
8ae79a13
SR
421 int ret;
422
8ae79a13
SR
423 mutex_lock(&event_mutex);
424 list_for_each_entry(call, &ftrace_events, list) {
425 if (!call->name || !call->regfunc)
426 continue;
427
8f31bfe5 428 if (system && strcmp(call->system, system) != 0)
8ae79a13
SR
429 continue;
430
431 /*
432 * We need to find out if all the events are set
433 * or if all events or cleared, or if we have
434 * a mixture.
435 */
436 if (call->enabled) {
437 switch (set) {
438 case -1:
439 set = 1;
440 break;
441 case 0:
442 set = 2;
443 break;
444 }
445 } else {
446 switch (set) {
447 case -1:
448 set = 0;
449 break;
450 case 1:
451 set = 2;
452 break;
453 }
454 }
455 /*
456 * If we have a mixture, no need to look further.
457 */
458 if (set == 2)
459 break;
460 }
461 mutex_unlock(&event_mutex);
462
463 buf[1] = '\n';
464 switch (set) {
465 case 0:
466 buf[0] = '0';
467 break;
468 case 1:
469 buf[0] = '1';
470 break;
471 case 2:
472 buf[0] = 'X';
473 break;
474 default:
475 buf[0] = '?';
476 }
477
478 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
479
480 return ret;
481}
482
483static ssize_t
484system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
485 loff_t *ppos)
486{
487 const char *system = filp->private_data;
488 unsigned long val;
8ae79a13
SR
489 char buf[64];
490 ssize_t ret;
491
492 if (cnt >= sizeof(buf))
493 return -EINVAL;
494
495 if (copy_from_user(&buf, ubuf, cnt))
496 return -EFAULT;
497
498 buf[cnt] = 0;
499
500 ret = strict_strtoul(buf, 10, &val);
501 if (ret < 0)
502 return ret;
503
504 ret = tracing_update_buffers();
505 if (ret < 0)
506 return ret;
507
8f31bfe5 508 if (val != 0 && val != 1)
8ae79a13 509 return -EINVAL;
8ae79a13 510
8f31bfe5 511 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
8ae79a13 512 if (ret)
8f31bfe5 513 goto out;
8ae79a13
SR
514
515 ret = cnt;
516
8f31bfe5 517out:
8ae79a13
SR
518 *ppos += cnt;
519
520 return ret;
521}
522
75db37d2
SR
523extern char *__bad_type_size(void);
524
91729ef9 525#undef FIELD
156b5f17 526#define FIELD(type, name) \
75db37d2 527 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
cf027f64
TZ
528 #type, "common_" #name, offsetof(typeof(field), name), \
529 sizeof(field.name)
91729ef9
SR
530
531static int trace_write_header(struct trace_seq *s)
532{
533 struct trace_entry field;
534
535 /* struct trace_entry */
536 return trace_seq_printf(s,
ce8eb2bf
SR
537 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
538 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
539 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
540 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
541 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
91729ef9 542 "\n",
89ec0dee 543 FIELD(unsigned short, type),
91729ef9
SR
544 FIELD(unsigned char, flags),
545 FIELD(unsigned char, preempt_count),
546 FIELD(int, pid),
547 FIELD(int, tgid));
548}
da4d0302 549
981d081e
SR
550static ssize_t
551event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
552 loff_t *ppos)
553{
554 struct ftrace_event_call *call = filp->private_data;
555 struct trace_seq *s;
556 char *buf;
557 int r;
558
c269fc8c
TZ
559 if (*ppos)
560 return 0;
561
981d081e
SR
562 s = kmalloc(sizeof(*s), GFP_KERNEL);
563 if (!s)
564 return -ENOMEM;
565
566 trace_seq_init(s);
567
c5e4e192
SR
568 /* If any of the first writes fail, so will the show_format. */
569
570 trace_seq_printf(s, "name: %s\n", call->name);
571 trace_seq_printf(s, "ID: %d\n", call->id);
572 trace_seq_printf(s, "format:\n");
91729ef9
SR
573 trace_write_header(s);
574
981d081e
SR
575 r = call->show_format(s);
576 if (!r) {
577 /*
578 * ug! The format output is bigger than a PAGE!!
579 */
580 buf = "FORMAT TOO BIG\n";
581 r = simple_read_from_buffer(ubuf, cnt, ppos,
582 buf, strlen(buf));
583 goto out;
584 }
585
586 r = simple_read_from_buffer(ubuf, cnt, ppos,
587 s->buffer, s->len);
588 out:
589 kfree(s);
590 return r;
591}
592
23725aee
PZ
593static ssize_t
594event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
595{
596 struct ftrace_event_call *call = filp->private_data;
597 struct trace_seq *s;
598 int r;
599
600 if (*ppos)
601 return 0;
602
603 s = kmalloc(sizeof(*s), GFP_KERNEL);
604 if (!s)
605 return -ENOMEM;
606
607 trace_seq_init(s);
608 trace_seq_printf(s, "%d\n", call->id);
609
610 r = simple_read_from_buffer(ubuf, cnt, ppos,
611 s->buffer, s->len);
612 kfree(s);
613 return r;
614}
615
7ce7e424
TZ
616static ssize_t
617event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
618 loff_t *ppos)
619{
620 struct ftrace_event_call *call = filp->private_data;
621 struct trace_seq *s;
622 int r;
623
624 if (*ppos)
625 return 0;
626
627 s = kmalloc(sizeof(*s), GFP_KERNEL);
628 if (!s)
629 return -ENOMEM;
630
631 trace_seq_init(s);
632
8b372562 633 print_event_filter(call, s);
4bda2d51 634 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
635
636 kfree(s);
637
638 return r;
639}
640
641static ssize_t
642event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
643 loff_t *ppos)
644{
645 struct ftrace_event_call *call = filp->private_data;
8b372562 646 char *buf;
7ce7e424
TZ
647 int err;
648
8b372562 649 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
650 return -EINVAL;
651
8b372562
TZ
652 buf = (char *)__get_free_page(GFP_TEMPORARY);
653 if (!buf)
7ce7e424
TZ
654 return -ENOMEM;
655
8b372562
TZ
656 if (copy_from_user(buf, ubuf, cnt)) {
657 free_page((unsigned long) buf);
658 return -EFAULT;
7ce7e424 659 }
8b372562 660 buf[cnt] = '\0';
7ce7e424 661
8b372562
TZ
662 err = apply_event_filter(call, buf);
663 free_page((unsigned long) buf);
664 if (err < 0)
44e9c8b7 665 return err;
0a19e53c 666
7ce7e424
TZ
667 *ppos += cnt;
668
669 return cnt;
670}
671
cfb180f3
TZ
672static ssize_t
673subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
674 loff_t *ppos)
675{
676 struct event_subsystem *system = filp->private_data;
677 struct trace_seq *s;
678 int r;
679
680 if (*ppos)
681 return 0;
682
683 s = kmalloc(sizeof(*s), GFP_KERNEL);
684 if (!s)
685 return -ENOMEM;
686
687 trace_seq_init(s);
688
8b372562 689 print_subsystem_event_filter(system, s);
4bda2d51 690 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
691
692 kfree(s);
693
694 return r;
695}
696
697static ssize_t
698subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
699 loff_t *ppos)
700{
701 struct event_subsystem *system = filp->private_data;
8b372562 702 char *buf;
cfb180f3
TZ
703 int err;
704
8b372562 705 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
706 return -EINVAL;
707
8b372562
TZ
708 buf = (char *)__get_free_page(GFP_TEMPORARY);
709 if (!buf)
cfb180f3
TZ
710 return -ENOMEM;
711
8b372562
TZ
712 if (copy_from_user(buf, ubuf, cnt)) {
713 free_page((unsigned long) buf);
714 return -EFAULT;
cfb180f3 715 }
8b372562 716 buf[cnt] = '\0';
cfb180f3 717
8b372562
TZ
718 err = apply_subsystem_event_filter(system, buf);
719 free_page((unsigned long) buf);
720 if (err < 0)
44e9c8b7 721 return err;
cfb180f3
TZ
722
723 *ppos += cnt;
724
725 return cnt;
726}
727
d1b182a8
SR
728static ssize_t
729show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
730{
731 int (*func)(struct trace_seq *s) = filp->private_data;
732 struct trace_seq *s;
733 int r;
734
735 if (*ppos)
736 return 0;
737
738 s = kmalloc(sizeof(*s), GFP_KERNEL);
739 if (!s)
740 return -ENOMEM;
741
742 trace_seq_init(s);
743
744 func(s);
745 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
746
747 kfree(s);
748
749 return r;
750}
751
b77e38aa
SR
752static const struct seq_operations show_event_seq_ops = {
753 .start = t_start,
754 .next = t_next,
755 .show = t_show,
756 .stop = t_stop,
757};
758
759static const struct seq_operations show_set_event_seq_ops = {
760 .start = s_start,
761 .next = s_next,
762 .show = t_show,
763 .stop = t_stop,
764};
765
2314c4ae
SR
766static const struct file_operations ftrace_avail_fops = {
767 .open = ftrace_event_seq_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = seq_release,
771};
772
b77e38aa
SR
773static const struct file_operations ftrace_set_event_fops = {
774 .open = ftrace_event_seq_open,
775 .read = seq_read,
776 .write = ftrace_event_write,
777 .llseek = seq_lseek,
778 .release = seq_release,
779};
780
1473e441
SR
781static const struct file_operations ftrace_enable_fops = {
782 .open = tracing_open_generic,
783 .read = event_enable_read,
784 .write = event_enable_write,
785};
786
981d081e
SR
787static const struct file_operations ftrace_event_format_fops = {
788 .open = tracing_open_generic,
789 .read = event_format_read,
790};
791
23725aee
PZ
792static const struct file_operations ftrace_event_id_fops = {
793 .open = tracing_open_generic,
794 .read = event_id_read,
795};
796
7ce7e424
TZ
797static const struct file_operations ftrace_event_filter_fops = {
798 .open = tracing_open_generic,
799 .read = event_filter_read,
800 .write = event_filter_write,
801};
802
cfb180f3
TZ
803static const struct file_operations ftrace_subsystem_filter_fops = {
804 .open = tracing_open_generic,
805 .read = subsystem_filter_read,
806 .write = subsystem_filter_write,
807};
808
8ae79a13
SR
809static const struct file_operations ftrace_system_enable_fops = {
810 .open = tracing_open_generic,
811 .read = system_enable_read,
812 .write = system_enable_write,
813};
814
d1b182a8
SR
815static const struct file_operations ftrace_show_header_fops = {
816 .open = tracing_open_generic,
817 .read = show_header,
818};
819
1473e441
SR
820static struct dentry *event_trace_events_dir(void)
821{
822 static struct dentry *d_tracer;
823 static struct dentry *d_events;
824
825 if (d_events)
826 return d_events;
827
828 d_tracer = tracing_init_dentry();
829 if (!d_tracer)
830 return NULL;
831
832 d_events = debugfs_create_dir("events", d_tracer);
833 if (!d_events)
834 pr_warning("Could not create debugfs "
835 "'events' directory\n");
836
837 return d_events;
838}
839
6ecc2d1c
SR
840static LIST_HEAD(event_subsystems);
841
842static struct dentry *
843event_subsystem_dir(const char *name, struct dentry *d_events)
844{
845 struct event_subsystem *system;
e1112b4d 846 struct dentry *entry;
6ecc2d1c
SR
847
848 /* First see if we did not already create this dir */
849 list_for_each_entry(system, &event_subsystems, list) {
850 if (strcmp(system->name, name) == 0)
851 return system->entry;
852 }
853
854 /* need to create new entry */
855 system = kmalloc(sizeof(*system), GFP_KERNEL);
856 if (!system) {
857 pr_warning("No memory to create event subsystem %s\n",
858 name);
859 return d_events;
860 }
861
862 system->entry = debugfs_create_dir(name, d_events);
863 if (!system->entry) {
864 pr_warning("Could not create event subsystem %s\n",
865 name);
866 kfree(system);
867 return d_events;
868 }
869
6d723736
SR
870 system->name = kstrdup(name, GFP_KERNEL);
871 if (!system->name) {
872 debugfs_remove(system->entry);
873 kfree(system);
874 return d_events;
875 }
876
6ecc2d1c
SR
877 list_add(&system->list, &event_subsystems);
878
30e673b2 879 system->filter = NULL;
cfb180f3 880
8b372562
TZ
881 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
882 if (!system->filter) {
883 pr_warning("Could not allocate filter for subsystem "
884 "'%s'\n", name);
885 return system->entry;
886 }
887
e1112b4d
TZ
888 entry = debugfs_create_file("filter", 0644, system->entry, system,
889 &ftrace_subsystem_filter_fops);
8b372562
TZ
890 if (!entry) {
891 kfree(system->filter);
892 system->filter = NULL;
e1112b4d
TZ
893 pr_warning("Could not create debugfs "
894 "'%s/filter' entry\n", name);
8b372562 895 }
e1112b4d 896
8ae79a13
SR
897 entry = trace_create_file("enable", 0644, system->entry,
898 (void *)system->name,
899 &ftrace_system_enable_fops);
900
6ecc2d1c
SR
901 return system->entry;
902}
903
1473e441 904static int
701970b3
SR
905event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
906 const struct file_operations *id,
907 const struct file_operations *enable,
908 const struct file_operations *filter,
909 const struct file_operations *format)
1473e441
SR
910{
911 struct dentry *entry;
fd994989 912 int ret;
1473e441 913
6ecc2d1c
SR
914 /*
915 * If the trace point header did not define TRACE_SYSTEM
916 * then the system would be called "TRACE_SYSTEM".
917 */
6d723736 918 if (strcmp(call->system, TRACE_SYSTEM) != 0)
6ecc2d1c
SR
919 d_events = event_subsystem_dir(call->system, d_events);
920
fd994989
SR
921 if (call->raw_init) {
922 ret = call->raw_init();
923 if (ret < 0) {
924 pr_warning("Could not initialize trace point"
925 " events/%s\n", call->name);
926 return ret;
927 }
928 }
929
1473e441
SR
930 call->dir = debugfs_create_dir(call->name, d_events);
931 if (!call->dir) {
932 pr_warning("Could not create debugfs "
933 "'%s' directory\n", call->name);
934 return -1;
935 }
936
6d723736
SR
937 if (call->regfunc)
938 entry = trace_create_file("enable", 0644, call->dir, call,
701970b3 939 enable);
1473e441 940
6d723736
SR
941 if (call->id)
942 entry = trace_create_file("id", 0444, call->dir, call,
701970b3 943 id);
23725aee 944
cf027f64
TZ
945 if (call->define_fields) {
946 ret = call->define_fields();
947 if (ret < 0) {
948 pr_warning("Could not initialize trace point"
949 " events/%s\n", call->name);
950 return ret;
951 }
6d723736 952 entry = trace_create_file("filter", 0644, call->dir, call,
701970b3 953 filter);
cf027f64
TZ
954 }
955
981d081e
SR
956 /* A trace may not want to export its format */
957 if (!call->show_format)
958 return 0;
959
6d723736 960 entry = trace_create_file("format", 0444, call->dir, call,
701970b3 961 format);
6d723736
SR
962
963 return 0;
964}
965
966#define for_each_event(event, start, end) \
967 for (event = start; \
968 (unsigned long)event < (unsigned long)end; \
969 event++)
970
61f919a1 971#ifdef CONFIG_MODULES
701970b3
SR
972
973static LIST_HEAD(ftrace_module_file_list);
974
975/*
976 * Modules must own their file_operations to keep up with
977 * reference counting.
978 */
979struct ftrace_module_file_ops {
980 struct list_head list;
981 struct module *mod;
982 struct file_operations id;
983 struct file_operations enable;
984 struct file_operations format;
985 struct file_operations filter;
986};
987
988static struct ftrace_module_file_ops *
989trace_create_file_ops(struct module *mod)
990{
991 struct ftrace_module_file_ops *file_ops;
992
993 /*
994 * This is a bit of a PITA. To allow for correct reference
995 * counting, modules must "own" their file_operations.
996 * To do this, we allocate the file operations that will be
997 * used in the event directory.
998 */
999
1000 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1001 if (!file_ops)
1002 return NULL;
1003
1004 file_ops->mod = mod;
1005
1006 file_ops->id = ftrace_event_id_fops;
1007 file_ops->id.owner = mod;
1008
1009 file_ops->enable = ftrace_enable_fops;
1010 file_ops->enable.owner = mod;
1011
1012 file_ops->filter = ftrace_event_filter_fops;
1013 file_ops->filter.owner = mod;
1014
1015 file_ops->format = ftrace_event_format_fops;
1016 file_ops->format.owner = mod;
1017
1018 list_add(&file_ops->list, &ftrace_module_file_list);
1019
1020 return file_ops;
1021}
1022
6d723736
SR
1023static void trace_module_add_events(struct module *mod)
1024{
701970b3 1025 struct ftrace_module_file_ops *file_ops = NULL;
6d723736
SR
1026 struct ftrace_event_call *call, *start, *end;
1027 struct dentry *d_events;
1028
1029 start = mod->trace_events;
1030 end = mod->trace_events + mod->num_trace_events;
1031
1032 if (start == end)
1033 return;
1034
1035 d_events = event_trace_events_dir();
1036 if (!d_events)
1037 return;
1038
1039 for_each_event(call, start, end) {
1040 /* The linker may leave blanks */
1041 if (!call->name)
1042 continue;
701970b3
SR
1043
1044 /*
1045 * This module has events, create file ops for this module
1046 * if not already done.
1047 */
1048 if (!file_ops) {
1049 file_ops = trace_create_file_ops(mod);
1050 if (!file_ops)
1051 return;
1052 }
6d723736
SR
1053 call->mod = mod;
1054 list_add(&call->list, &ftrace_events);
701970b3
SR
1055 event_create_dir(call, d_events,
1056 &file_ops->id, &file_ops->enable,
1057 &file_ops->filter, &file_ops->format);
6d723736
SR
1058 }
1059}
1060
1061static void trace_module_remove_events(struct module *mod)
1062{
701970b3 1063 struct ftrace_module_file_ops *file_ops;
6d723736 1064 struct ftrace_event_call *call, *p;
9456f0fa 1065 bool found = false;
6d723736
SR
1066
1067 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1068 if (call->mod == mod) {
9456f0fa 1069 found = true;
6d723736
SR
1070 if (call->enabled) {
1071 call->enabled = 0;
1072 call->unregfunc();
1073 }
1074 if (call->event)
1075 unregister_ftrace_event(call->event);
1076 debugfs_remove_recursive(call->dir);
1077 list_del(&call->list);
2df75e41
LZ
1078 trace_destroy_fields(call);
1079 destroy_preds(call);
6d723736
SR
1080 }
1081 }
701970b3
SR
1082
1083 /* Now free the file_operations */
1084 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1085 if (file_ops->mod == mod)
1086 break;
1087 }
1088 if (&file_ops->list != &ftrace_module_file_list) {
1089 list_del(&file_ops->list);
1090 kfree(file_ops);
1091 }
9456f0fa
SR
1092
1093 /*
1094 * It is safest to reset the ring buffer if the module being unloaded
1095 * registered any events.
1096 */
1097 if (found)
1098 tracing_reset_current_online_cpus();
6d723736
SR
1099}
1100
61f919a1
SR
1101static int trace_module_notify(struct notifier_block *self,
1102 unsigned long val, void *data)
6d723736
SR
1103{
1104 struct module *mod = data;
1105
1106 mutex_lock(&event_mutex);
1107 switch (val) {
1108 case MODULE_STATE_COMING:
1109 trace_module_add_events(mod);
1110 break;
1111 case MODULE_STATE_GOING:
1112 trace_module_remove_events(mod);
1113 break;
1114 }
1115 mutex_unlock(&event_mutex);
fd994989 1116
1473e441
SR
1117 return 0;
1118}
61f919a1
SR
1119#else
1120static int trace_module_notify(struct notifier_block *self,
1121 unsigned long val, void *data)
1122{
1123 return 0;
1124}
1125#endif /* CONFIG_MODULES */
1473e441 1126
6d723736
SR
1127struct notifier_block trace_module_nb = {
1128 .notifier_call = trace_module_notify,
1129 .priority = 0,
1130};
1131
a59fd602
SR
1132extern struct ftrace_event_call __start_ftrace_events[];
1133extern struct ftrace_event_call __stop_ftrace_events[];
1134
b77e38aa
SR
1135static __init int event_trace_init(void)
1136{
a59fd602 1137 struct ftrace_event_call *call;
b77e38aa
SR
1138 struct dentry *d_tracer;
1139 struct dentry *entry;
1473e441 1140 struct dentry *d_events;
6d723736 1141 int ret;
b77e38aa
SR
1142
1143 d_tracer = tracing_init_dentry();
1144 if (!d_tracer)
1145 return 0;
1146
2314c4ae
SR
1147 entry = debugfs_create_file("available_events", 0444, d_tracer,
1148 (void *)&show_event_seq_ops,
1149 &ftrace_avail_fops);
1150 if (!entry)
1151 pr_warning("Could not create debugfs "
1152 "'available_events' entry\n");
1153
b77e38aa
SR
1154 entry = debugfs_create_file("set_event", 0644, d_tracer,
1155 (void *)&show_set_event_seq_ops,
1156 &ftrace_set_event_fops);
1157 if (!entry)
1158 pr_warning("Could not create debugfs "
1159 "'set_event' entry\n");
1160
1473e441
SR
1161 d_events = event_trace_events_dir();
1162 if (!d_events)
1163 return 0;
1164
d1b182a8
SR
1165 /* ring buffer internal formats */
1166 trace_create_file("header_page", 0444, d_events,
1167 ring_buffer_print_page_header,
1168 &ftrace_show_header_fops);
1169
1170 trace_create_file("header_event", 0444, d_events,
1171 ring_buffer_print_entry_header,
1172 &ftrace_show_header_fops);
1173
8ae79a13 1174 trace_create_file("enable", 0644, d_events,
8f31bfe5 1175 NULL, &ftrace_system_enable_fops);
8ae79a13 1176
6d723736 1177 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1473e441
SR
1178 /* The linker may leave blanks */
1179 if (!call->name)
1180 continue;
a59fd602 1181 list_add(&call->list, &ftrace_events);
701970b3
SR
1182 event_create_dir(call, d_events, &ftrace_event_id_fops,
1183 &ftrace_enable_fops, &ftrace_event_filter_fops,
1184 &ftrace_event_format_fops);
1473e441
SR
1185 }
1186
6d723736
SR
1187 ret = register_module_notifier(&trace_module_nb);
1188 if (!ret)
1189 pr_warning("Failed to register trace events module notifier\n");
1190
b77e38aa
SR
1191 return 0;
1192}
1193fs_initcall(event_trace_init);
e6187007
SR
1194
1195#ifdef CONFIG_FTRACE_STARTUP_TEST
1196
1197static DEFINE_SPINLOCK(test_spinlock);
1198static DEFINE_SPINLOCK(test_spinlock_irq);
1199static DEFINE_MUTEX(test_mutex);
1200
1201static __init void test_work(struct work_struct *dummy)
1202{
1203 spin_lock(&test_spinlock);
1204 spin_lock_irq(&test_spinlock_irq);
1205 udelay(1);
1206 spin_unlock_irq(&test_spinlock_irq);
1207 spin_unlock(&test_spinlock);
1208
1209 mutex_lock(&test_mutex);
1210 msleep(1);
1211 mutex_unlock(&test_mutex);
1212}
1213
1214static __init int event_test_thread(void *unused)
1215{
1216 void *test_malloc;
1217
1218 test_malloc = kmalloc(1234, GFP_KERNEL);
1219 if (!test_malloc)
1220 pr_info("failed to kmalloc\n");
1221
1222 schedule_on_each_cpu(test_work);
1223
1224 kfree(test_malloc);
1225
1226 set_current_state(TASK_INTERRUPTIBLE);
1227 while (!kthread_should_stop())
1228 schedule();
1229
1230 return 0;
1231}
1232
1233/*
1234 * Do various things that may trigger events.
1235 */
1236static __init void event_test_stuff(void)
1237{
1238 struct task_struct *test_thread;
1239
1240 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1241 msleep(1);
1242 kthread_stop(test_thread);
1243}
1244
1245/*
1246 * For every trace event defined, we will test each trace point separately,
1247 * and then by groups, and finally all trace points.
1248 */
9ea21c1e 1249static __init void event_trace_self_tests(void)
e6187007
SR
1250{
1251 struct ftrace_event_call *call;
1252 struct event_subsystem *system;
e6187007
SR
1253 int ret;
1254
1255 pr_info("Running tests on trace events:\n");
1256
1257 list_for_each_entry(call, &ftrace_events, list) {
1258
1259 /* Only test those that have a regfunc */
1260 if (!call->regfunc)
1261 continue;
1262
1263 pr_info("Testing event %s: ", call->name);
1264
1265 /*
1266 * If an event is already enabled, someone is using
1267 * it and the self test should not be on.
1268 */
1269 if (call->enabled) {
1270 pr_warning("Enabled event during self test!\n");
1271 WARN_ON_ONCE(1);
1272 continue;
1273 }
1274
1275 call->enabled = 1;
1276 call->regfunc();
1277
1278 event_test_stuff();
1279
1280 call->unregfunc();
1281 call->enabled = 0;
1282
1283 pr_cont("OK\n");
1284 }
1285
1286 /* Now test at the sub system level */
1287
1288 pr_info("Running tests on trace event systems:\n");
1289
1290 list_for_each_entry(system, &event_subsystems, list) {
1291
1292 /* the ftrace system is special, skip it */
1293 if (strcmp(system->name, "ftrace") == 0)
1294 continue;
1295
1296 pr_info("Testing event system %s: ", system->name);
1297
8f31bfe5 1298 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
e6187007
SR
1299 if (WARN_ON_ONCE(ret)) {
1300 pr_warning("error enabling system %s\n",
1301 system->name);
1302 continue;
1303 }
1304
1305 event_test_stuff();
1306
8f31bfe5 1307 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
e6187007
SR
1308 if (WARN_ON_ONCE(ret))
1309 pr_warning("error disabling system %s\n",
1310 system->name);
1311
1312 pr_cont("OK\n");
1313 }
1314
1315 /* Test with all events enabled */
1316
1317 pr_info("Running tests on all trace events:\n");
1318 pr_info("Testing all events: ");
1319
8f31bfe5 1320 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
e6187007 1321 if (WARN_ON_ONCE(ret)) {
e6187007 1322 pr_warning("error enabling all events\n");
9ea21c1e 1323 return;
e6187007
SR
1324 }
1325
1326 event_test_stuff();
1327
1328 /* reset sysname */
8f31bfe5 1329 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
e6187007
SR
1330 if (WARN_ON_ONCE(ret)) {
1331 pr_warning("error disabling all events\n");
9ea21c1e 1332 return;
e6187007
SR
1333 }
1334
1335 pr_cont("OK\n");
9ea21c1e
SR
1336}
1337
1338#ifdef CONFIG_FUNCTION_TRACER
1339
1340static DEFINE_PER_CPU(atomic_t, test_event_disable);
1341
1342static void
1343function_test_events_call(unsigned long ip, unsigned long parent_ip)
1344{
1345 struct ring_buffer_event *event;
1346 struct ftrace_entry *entry;
1347 unsigned long flags;
1348 long disabled;
1349 int resched;
1350 int cpu;
1351 int pc;
1352
1353 pc = preempt_count();
1354 resched = ftrace_preempt_disable();
1355 cpu = raw_smp_processor_id();
1356 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1357
1358 if (disabled != 1)
1359 goto out;
1360
1361 local_save_flags(flags);
1362
1363 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1364 flags, pc);
1365 if (!event)
1366 goto out;
1367 entry = ring_buffer_event_data(event);
1368 entry->ip = ip;
1369 entry->parent_ip = parent_ip;
1370
cb4764a6 1371 trace_nowake_buffer_unlock_commit(event, flags, pc);
9ea21c1e
SR
1372
1373 out:
1374 atomic_dec(&per_cpu(test_event_disable, cpu));
1375 ftrace_preempt_enable(resched);
1376}
1377
1378static struct ftrace_ops trace_ops __initdata =
1379{
1380 .func = function_test_events_call,
1381};
1382
1383static __init void event_trace_self_test_with_function(void)
1384{
1385 register_ftrace_function(&trace_ops);
1386 pr_info("Running tests again, along with the function tracer\n");
1387 event_trace_self_tests();
1388 unregister_ftrace_function(&trace_ops);
1389}
1390#else
1391static __init void event_trace_self_test_with_function(void)
1392{
1393}
1394#endif
1395
1396static __init int event_trace_self_tests_init(void)
1397{
1398
1399 event_trace_self_tests();
1400
1401 event_trace_self_test_with_function();
e6187007
SR
1402
1403 return 0;
1404}
1405
28d20e2d 1406late_initcall(event_trace_self_tests_init);
e6187007
SR
1407
1408#endif
This page took 0.149311 seconds and 5 git commands to generate.