kmemtrace: fix kernel parameter documentation
[deliverable/linux.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
e6187007 18#include <linux/delay.h>
b77e38aa 19
91729ef9 20#include "trace_output.h"
b77e38aa 21
b628b3e6
SR
22#define TRACE_SYSTEM "TRACE_SYSTEM"
23
20c8928a 24DEFINE_MUTEX(event_mutex);
11a241a3 25
a59fd602
SR
26LIST_HEAD(ftrace_events);
27
cf027f64 28int trace_define_field(struct ftrace_event_call *call, char *type,
a118e4d1 29 char *name, int offset, int size, int is_signed)
cf027f64
TZ
30{
31 struct ftrace_event_field *field;
32
fe9f57f2 33 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
34 if (!field)
35 goto err;
fe9f57f2 36
cf027f64
TZ
37 field->name = kstrdup(name, GFP_KERNEL);
38 if (!field->name)
39 goto err;
fe9f57f2 40
cf027f64
TZ
41 field->type = kstrdup(type, GFP_KERNEL);
42 if (!field->type)
43 goto err;
fe9f57f2 44
cf027f64
TZ
45 field->offset = offset;
46 field->size = size;
a118e4d1 47 field->is_signed = is_signed;
cf027f64
TZ
48 list_add(&field->link, &call->fields);
49
50 return 0;
fe9f57f2 51
cf027f64
TZ
52err:
53 if (field) {
54 kfree(field->name);
55 kfree(field->type);
56 }
57 kfree(field);
fe9f57f2 58
cf027f64
TZ
59 return -ENOMEM;
60}
17c873ec 61EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 62
2df75e41
LZ
63#ifdef CONFIG_MODULES
64
65static void trace_destroy_fields(struct ftrace_event_call *call)
66{
67 struct ftrace_event_field *field, *next;
68
69 list_for_each_entry_safe(field, next, &call->fields, link) {
70 list_del(&field->link);
71 kfree(field->type);
72 kfree(field->name);
73 kfree(field);
74 }
75}
76
77#endif /* CONFIG_MODULES */
78
b77e38aa
SR
79static void ftrace_clear_events(void)
80{
a59fd602 81 struct ftrace_event_call *call;
b77e38aa 82
20c8928a 83 mutex_lock(&event_mutex);
a59fd602 84 list_for_each_entry(call, &ftrace_events, list) {
b77e38aa
SR
85
86 if (call->enabled) {
87 call->enabled = 0;
88 call->unregfunc();
89 }
b77e38aa 90 }
20c8928a 91 mutex_unlock(&event_mutex);
b77e38aa
SR
92}
93
fd994989
SR
94static void ftrace_event_enable_disable(struct ftrace_event_call *call,
95 int enable)
96{
97
98 switch (enable) {
99 case 0:
100 if (call->enabled) {
101 call->enabled = 0;
102 call->unregfunc();
103 }
fd994989
SR
104 break;
105 case 1:
da4d0302 106 if (!call->enabled) {
fd994989
SR
107 call->enabled = 1;
108 call->regfunc();
109 }
fd994989
SR
110 break;
111 }
112}
113
8f31bfe5
LZ
114/*
115 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
116 */
117static int __ftrace_set_clr_event(const char *match, const char *sub,
118 const char *event, int set)
b77e38aa 119{
a59fd602 120 struct ftrace_event_call *call;
29f93943 121 int ret = -EINVAL;
8f31bfe5
LZ
122
123 mutex_lock(&event_mutex);
124 list_for_each_entry(call, &ftrace_events, list) {
125
126 if (!call->name || !call->regfunc)
127 continue;
128
129 if (match &&
130 strcmp(match, call->name) != 0 &&
131 strcmp(match, call->system) != 0)
132 continue;
133
134 if (sub && strcmp(sub, call->system) != 0)
135 continue;
136
137 if (event && strcmp(event, call->name) != 0)
138 continue;
139
140 ftrace_event_enable_disable(call, set);
141
142 ret = 0;
143 }
144 mutex_unlock(&event_mutex);
145
146 return ret;
147}
148
149static int ftrace_set_clr_event(char *buf, int set)
150{
b628b3e6 151 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
152
153 /*
154 * The buf format can be <subsystem>:<event-name>
155 * *:<event-name> means any event by that name.
156 * :<event-name> is the same.
157 *
158 * <subsystem>:* means all events in that subsystem
159 * <subsystem>: means the same.
160 *
161 * <name> (no ':') means all events in a subsystem with
162 * the name <name> or any event that matches <name>
163 */
164
165 match = strsep(&buf, ":");
166 if (buf) {
167 sub = match;
168 event = buf;
169 match = NULL;
170
171 if (!strlen(sub) || strcmp(sub, "*") == 0)
172 sub = NULL;
173 if (!strlen(event) || strcmp(event, "*") == 0)
174 event = NULL;
175 }
b77e38aa 176
8f31bfe5 177 return __ftrace_set_clr_event(match, sub, event, set);
b77e38aa
SR
178}
179
4671c794
SR
180/**
181 * trace_set_clr_event - enable or disable an event
182 * @system: system name to match (NULL for any system)
183 * @event: event name to match (NULL for all events, within system)
184 * @set: 1 to enable, 0 to disable
185 *
186 * This is a way for other parts of the kernel to enable or disable
187 * event recording.
188 *
189 * Returns 0 on success, -EINVAL if the parameters do not match any
190 * registered events.
191 */
192int trace_set_clr_event(const char *system, const char *event, int set)
193{
194 return __ftrace_set_clr_event(NULL, system, event, set);
195}
196
b77e38aa
SR
197/* 128 should be much more than enough */
198#define EVENT_BUF_SIZE 127
199
200static ssize_t
201ftrace_event_write(struct file *file, const char __user *ubuf,
202 size_t cnt, loff_t *ppos)
203{
204 size_t read = 0;
205 int i, set = 1;
206 ssize_t ret;
207 char *buf;
208 char ch;
209
210 if (!cnt || cnt < 0)
211 return 0;
212
1852fcce
SR
213 ret = tracing_update_buffers();
214 if (ret < 0)
215 return ret;
216
b77e38aa
SR
217 ret = get_user(ch, ubuf++);
218 if (ret)
219 return ret;
220 read++;
221 cnt--;
222
223 /* skip white space */
224 while (cnt && isspace(ch)) {
225 ret = get_user(ch, ubuf++);
226 if (ret)
227 return ret;
228 read++;
229 cnt--;
230 }
231
232 /* Only white space found? */
233 if (isspace(ch)) {
234 file->f_pos += read;
235 ret = read;
236 return ret;
237 }
238
239 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
240 if (!buf)
241 return -ENOMEM;
242
243 if (cnt > EVENT_BUF_SIZE)
244 cnt = EVENT_BUF_SIZE;
245
246 i = 0;
247 while (cnt && !isspace(ch)) {
248 if (!i && ch == '!')
249 set = 0;
250 else
251 buf[i++] = ch;
252
253 ret = get_user(ch, ubuf++);
254 if (ret)
255 goto out_free;
256 read++;
257 cnt--;
258 }
259 buf[i] = 0;
260
261 file->f_pos += read;
262
263 ret = ftrace_set_clr_event(buf, set);
264 if (ret)
265 goto out_free;
266
267 ret = read;
268
269 out_free:
270 kfree(buf);
271
272 return ret;
273}
274
275static void *
276t_next(struct seq_file *m, void *v, loff_t *pos)
277{
a59fd602
SR
278 struct list_head *list = m->private;
279 struct ftrace_event_call *call;
b77e38aa
SR
280
281 (*pos)++;
282
40e26815 283 for (;;) {
a59fd602 284 if (list == &ftrace_events)
40e26815
SR
285 return NULL;
286
a59fd602
SR
287 call = list_entry(list, struct ftrace_event_call, list);
288
40e26815
SR
289 /*
290 * The ftrace subsystem is for showing formats only.
291 * They can not be enabled or disabled via the event files.
292 */
293 if (call->regfunc)
294 break;
295
a59fd602 296 list = list->next;
40e26815 297 }
b77e38aa 298
a59fd602 299 m->private = list->next;
b77e38aa
SR
300
301 return call;
302}
303
304static void *t_start(struct seq_file *m, loff_t *pos)
305{
20c8928a
LZ
306 mutex_lock(&event_mutex);
307 if (*pos == 0)
308 m->private = ftrace_events.next;
b77e38aa
SR
309 return t_next(m, NULL, pos);
310}
311
312static void *
313s_next(struct seq_file *m, void *v, loff_t *pos)
314{
a59fd602
SR
315 struct list_head *list = m->private;
316 struct ftrace_event_call *call;
b77e38aa
SR
317
318 (*pos)++;
319
320 retry:
a59fd602 321 if (list == &ftrace_events)
b77e38aa
SR
322 return NULL;
323
a59fd602
SR
324 call = list_entry(list, struct ftrace_event_call, list);
325
b77e38aa 326 if (!call->enabled) {
a59fd602 327 list = list->next;
b77e38aa
SR
328 goto retry;
329 }
330
a59fd602 331 m->private = list->next;
b77e38aa
SR
332
333 return call;
334}
335
336static void *s_start(struct seq_file *m, loff_t *pos)
337{
20c8928a
LZ
338 mutex_lock(&event_mutex);
339 if (*pos == 0)
340 m->private = ftrace_events.next;
b77e38aa
SR
341 return s_next(m, NULL, pos);
342}
343
344static int t_show(struct seq_file *m, void *v)
345{
346 struct ftrace_event_call *call = v;
347
b628b3e6
SR
348 if (strcmp(call->system, TRACE_SYSTEM) != 0)
349 seq_printf(m, "%s:", call->system);
b77e38aa
SR
350 seq_printf(m, "%s\n", call->name);
351
352 return 0;
353}
354
355static void t_stop(struct seq_file *m, void *p)
356{
20c8928a 357 mutex_unlock(&event_mutex);
b77e38aa
SR
358}
359
360static int
361ftrace_event_seq_open(struct inode *inode, struct file *file)
362{
b77e38aa
SR
363 const struct seq_operations *seq_ops;
364
365 if ((file->f_mode & FMODE_WRITE) &&
366 !(file->f_flags & O_APPEND))
367 ftrace_clear_events();
368
369 seq_ops = inode->i_private;
20c8928a 370 return seq_open(file, seq_ops);
b77e38aa
SR
371}
372
1473e441
SR
373static ssize_t
374event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
375 loff_t *ppos)
376{
377 struct ftrace_event_call *call = filp->private_data;
378 char *buf;
379
da4d0302 380 if (call->enabled)
1473e441
SR
381 buf = "1\n";
382 else
383 buf = "0\n";
384
385 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
386}
387
388static ssize_t
389event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
390 loff_t *ppos)
391{
392 struct ftrace_event_call *call = filp->private_data;
393 char buf[64];
394 unsigned long val;
395 int ret;
396
397 if (cnt >= sizeof(buf))
398 return -EINVAL;
399
400 if (copy_from_user(&buf, ubuf, cnt))
401 return -EFAULT;
402
403 buf[cnt] = 0;
404
405 ret = strict_strtoul(buf, 10, &val);
406 if (ret < 0)
407 return ret;
408
1852fcce
SR
409 ret = tracing_update_buffers();
410 if (ret < 0)
411 return ret;
412
1473e441
SR
413 switch (val) {
414 case 0:
1473e441 415 case 1:
11a241a3 416 mutex_lock(&event_mutex);
fd994989 417 ftrace_event_enable_disable(call, val);
11a241a3 418 mutex_unlock(&event_mutex);
1473e441
SR
419 break;
420
421 default:
422 return -EINVAL;
423 }
424
425 *ppos += cnt;
426
427 return cnt;
428}
429
8ae79a13
SR
430static ssize_t
431system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
432 loff_t *ppos)
433{
c142b15d 434 const char set_to_char[4] = { '?', '0', '1', 'X' };
8ae79a13
SR
435 const char *system = filp->private_data;
436 struct ftrace_event_call *call;
437 char buf[2];
c142b15d 438 int set = 0;
8ae79a13
SR
439 int ret;
440
8ae79a13
SR
441 mutex_lock(&event_mutex);
442 list_for_each_entry(call, &ftrace_events, list) {
443 if (!call->name || !call->regfunc)
444 continue;
445
8f31bfe5 446 if (system && strcmp(call->system, system) != 0)
8ae79a13
SR
447 continue;
448
449 /*
450 * We need to find out if all the events are set
451 * or if all events or cleared, or if we have
452 * a mixture.
453 */
c142b15d
LZ
454 set |= (1 << !!call->enabled);
455
8ae79a13
SR
456 /*
457 * If we have a mixture, no need to look further.
458 */
c142b15d 459 if (set == 3)
8ae79a13
SR
460 break;
461 }
462 mutex_unlock(&event_mutex);
463
c142b15d 464 buf[0] = set_to_char[set];
8ae79a13 465 buf[1] = '\n';
8ae79a13
SR
466
467 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
468
469 return ret;
470}
471
472static ssize_t
473system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
474 loff_t *ppos)
475{
476 const char *system = filp->private_data;
477 unsigned long val;
8ae79a13
SR
478 char buf[64];
479 ssize_t ret;
480
481 if (cnt >= sizeof(buf))
482 return -EINVAL;
483
484 if (copy_from_user(&buf, ubuf, cnt))
485 return -EFAULT;
486
487 buf[cnt] = 0;
488
489 ret = strict_strtoul(buf, 10, &val);
490 if (ret < 0)
491 return ret;
492
493 ret = tracing_update_buffers();
494 if (ret < 0)
495 return ret;
496
8f31bfe5 497 if (val != 0 && val != 1)
8ae79a13 498 return -EINVAL;
8ae79a13 499
8f31bfe5 500 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
8ae79a13 501 if (ret)
8f31bfe5 502 goto out;
8ae79a13
SR
503
504 ret = cnt;
505
8f31bfe5 506out:
8ae79a13
SR
507 *ppos += cnt;
508
509 return ret;
510}
511
75db37d2
SR
512extern char *__bad_type_size(void);
513
91729ef9 514#undef FIELD
156b5f17 515#define FIELD(type, name) \
75db37d2 516 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
cf027f64
TZ
517 #type, "common_" #name, offsetof(typeof(field), name), \
518 sizeof(field.name)
91729ef9
SR
519
520static int trace_write_header(struct trace_seq *s)
521{
522 struct trace_entry field;
523
524 /* struct trace_entry */
525 return trace_seq_printf(s,
ce8eb2bf
SR
526 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
527 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
528 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
529 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
530 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
91729ef9 531 "\n",
89ec0dee 532 FIELD(unsigned short, type),
91729ef9
SR
533 FIELD(unsigned char, flags),
534 FIELD(unsigned char, preempt_count),
535 FIELD(int, pid),
536 FIELD(int, tgid));
537}
da4d0302 538
981d081e
SR
539static ssize_t
540event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
541 loff_t *ppos)
542{
543 struct ftrace_event_call *call = filp->private_data;
544 struct trace_seq *s;
545 char *buf;
546 int r;
547
c269fc8c
TZ
548 if (*ppos)
549 return 0;
550
981d081e
SR
551 s = kmalloc(sizeof(*s), GFP_KERNEL);
552 if (!s)
553 return -ENOMEM;
554
555 trace_seq_init(s);
556
c5e4e192
SR
557 /* If any of the first writes fail, so will the show_format. */
558
559 trace_seq_printf(s, "name: %s\n", call->name);
560 trace_seq_printf(s, "ID: %d\n", call->id);
561 trace_seq_printf(s, "format:\n");
91729ef9
SR
562 trace_write_header(s);
563
981d081e
SR
564 r = call->show_format(s);
565 if (!r) {
566 /*
567 * ug! The format output is bigger than a PAGE!!
568 */
569 buf = "FORMAT TOO BIG\n";
570 r = simple_read_from_buffer(ubuf, cnt, ppos,
571 buf, strlen(buf));
572 goto out;
573 }
574
575 r = simple_read_from_buffer(ubuf, cnt, ppos,
576 s->buffer, s->len);
577 out:
578 kfree(s);
579 return r;
580}
581
23725aee
PZ
582static ssize_t
583event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
584{
585 struct ftrace_event_call *call = filp->private_data;
586 struct trace_seq *s;
587 int r;
588
589 if (*ppos)
590 return 0;
591
592 s = kmalloc(sizeof(*s), GFP_KERNEL);
593 if (!s)
594 return -ENOMEM;
595
596 trace_seq_init(s);
597 trace_seq_printf(s, "%d\n", call->id);
598
599 r = simple_read_from_buffer(ubuf, cnt, ppos,
600 s->buffer, s->len);
601 kfree(s);
602 return r;
603}
604
7ce7e424
TZ
605static ssize_t
606event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
607 loff_t *ppos)
608{
609 struct ftrace_event_call *call = filp->private_data;
610 struct trace_seq *s;
611 int r;
612
613 if (*ppos)
614 return 0;
615
616 s = kmalloc(sizeof(*s), GFP_KERNEL);
617 if (!s)
618 return -ENOMEM;
619
620 trace_seq_init(s);
621
8b372562 622 print_event_filter(call, s);
4bda2d51 623 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
624
625 kfree(s);
626
627 return r;
628}
629
630static ssize_t
631event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
632 loff_t *ppos)
633{
634 struct ftrace_event_call *call = filp->private_data;
8b372562 635 char *buf;
7ce7e424
TZ
636 int err;
637
8b372562 638 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
639 return -EINVAL;
640
8b372562
TZ
641 buf = (char *)__get_free_page(GFP_TEMPORARY);
642 if (!buf)
7ce7e424
TZ
643 return -ENOMEM;
644
8b372562
TZ
645 if (copy_from_user(buf, ubuf, cnt)) {
646 free_page((unsigned long) buf);
647 return -EFAULT;
7ce7e424 648 }
8b372562 649 buf[cnt] = '\0';
7ce7e424 650
8b372562
TZ
651 err = apply_event_filter(call, buf);
652 free_page((unsigned long) buf);
653 if (err < 0)
44e9c8b7 654 return err;
0a19e53c 655
7ce7e424
TZ
656 *ppos += cnt;
657
658 return cnt;
659}
660
cfb180f3
TZ
661static ssize_t
662subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
663 loff_t *ppos)
664{
665 struct event_subsystem *system = filp->private_data;
666 struct trace_seq *s;
667 int r;
668
669 if (*ppos)
670 return 0;
671
672 s = kmalloc(sizeof(*s), GFP_KERNEL);
673 if (!s)
674 return -ENOMEM;
675
676 trace_seq_init(s);
677
8b372562 678 print_subsystem_event_filter(system, s);
4bda2d51 679 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
680
681 kfree(s);
682
683 return r;
684}
685
686static ssize_t
687subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
688 loff_t *ppos)
689{
690 struct event_subsystem *system = filp->private_data;
8b372562 691 char *buf;
cfb180f3
TZ
692 int err;
693
8b372562 694 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
695 return -EINVAL;
696
8b372562
TZ
697 buf = (char *)__get_free_page(GFP_TEMPORARY);
698 if (!buf)
cfb180f3
TZ
699 return -ENOMEM;
700
8b372562
TZ
701 if (copy_from_user(buf, ubuf, cnt)) {
702 free_page((unsigned long) buf);
703 return -EFAULT;
cfb180f3 704 }
8b372562 705 buf[cnt] = '\0';
cfb180f3 706
8b372562
TZ
707 err = apply_subsystem_event_filter(system, buf);
708 free_page((unsigned long) buf);
709 if (err < 0)
44e9c8b7 710 return err;
cfb180f3
TZ
711
712 *ppos += cnt;
713
714 return cnt;
715}
716
d1b182a8
SR
717static ssize_t
718show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
719{
720 int (*func)(struct trace_seq *s) = filp->private_data;
721 struct trace_seq *s;
722 int r;
723
724 if (*ppos)
725 return 0;
726
727 s = kmalloc(sizeof(*s), GFP_KERNEL);
728 if (!s)
729 return -ENOMEM;
730
731 trace_seq_init(s);
732
733 func(s);
734 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
735
736 kfree(s);
737
738 return r;
739}
740
b77e38aa
SR
741static const struct seq_operations show_event_seq_ops = {
742 .start = t_start,
743 .next = t_next,
744 .show = t_show,
745 .stop = t_stop,
746};
747
748static const struct seq_operations show_set_event_seq_ops = {
749 .start = s_start,
750 .next = s_next,
751 .show = t_show,
752 .stop = t_stop,
753};
754
2314c4ae
SR
755static const struct file_operations ftrace_avail_fops = {
756 .open = ftrace_event_seq_open,
757 .read = seq_read,
758 .llseek = seq_lseek,
759 .release = seq_release,
760};
761
b77e38aa
SR
762static const struct file_operations ftrace_set_event_fops = {
763 .open = ftrace_event_seq_open,
764 .read = seq_read,
765 .write = ftrace_event_write,
766 .llseek = seq_lseek,
767 .release = seq_release,
768};
769
1473e441
SR
770static const struct file_operations ftrace_enable_fops = {
771 .open = tracing_open_generic,
772 .read = event_enable_read,
773 .write = event_enable_write,
774};
775
981d081e
SR
776static const struct file_operations ftrace_event_format_fops = {
777 .open = tracing_open_generic,
778 .read = event_format_read,
779};
780
23725aee
PZ
781static const struct file_operations ftrace_event_id_fops = {
782 .open = tracing_open_generic,
783 .read = event_id_read,
784};
785
7ce7e424
TZ
786static const struct file_operations ftrace_event_filter_fops = {
787 .open = tracing_open_generic,
788 .read = event_filter_read,
789 .write = event_filter_write,
790};
791
cfb180f3
TZ
792static const struct file_operations ftrace_subsystem_filter_fops = {
793 .open = tracing_open_generic,
794 .read = subsystem_filter_read,
795 .write = subsystem_filter_write,
796};
797
8ae79a13
SR
798static const struct file_operations ftrace_system_enable_fops = {
799 .open = tracing_open_generic,
800 .read = system_enable_read,
801 .write = system_enable_write,
802};
803
d1b182a8
SR
804static const struct file_operations ftrace_show_header_fops = {
805 .open = tracing_open_generic,
806 .read = show_header,
807};
808
1473e441
SR
809static struct dentry *event_trace_events_dir(void)
810{
811 static struct dentry *d_tracer;
812 static struct dentry *d_events;
813
814 if (d_events)
815 return d_events;
816
817 d_tracer = tracing_init_dentry();
818 if (!d_tracer)
819 return NULL;
820
821 d_events = debugfs_create_dir("events", d_tracer);
822 if (!d_events)
823 pr_warning("Could not create debugfs "
824 "'events' directory\n");
825
826 return d_events;
827}
828
6ecc2d1c
SR
829static LIST_HEAD(event_subsystems);
830
831static struct dentry *
832event_subsystem_dir(const char *name, struct dentry *d_events)
833{
834 struct event_subsystem *system;
e1112b4d 835 struct dentry *entry;
6ecc2d1c
SR
836
837 /* First see if we did not already create this dir */
838 list_for_each_entry(system, &event_subsystems, list) {
839 if (strcmp(system->name, name) == 0)
840 return system->entry;
841 }
842
843 /* need to create new entry */
844 system = kmalloc(sizeof(*system), GFP_KERNEL);
845 if (!system) {
846 pr_warning("No memory to create event subsystem %s\n",
847 name);
848 return d_events;
849 }
850
851 system->entry = debugfs_create_dir(name, d_events);
852 if (!system->entry) {
853 pr_warning("Could not create event subsystem %s\n",
854 name);
855 kfree(system);
856 return d_events;
857 }
858
6d723736
SR
859 system->name = kstrdup(name, GFP_KERNEL);
860 if (!system->name) {
861 debugfs_remove(system->entry);
862 kfree(system);
863 return d_events;
864 }
865
6ecc2d1c
SR
866 list_add(&system->list, &event_subsystems);
867
30e673b2 868 system->filter = NULL;
cfb180f3 869
8b372562
TZ
870 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
871 if (!system->filter) {
872 pr_warning("Could not allocate filter for subsystem "
873 "'%s'\n", name);
874 return system->entry;
875 }
876
e1112b4d
TZ
877 entry = debugfs_create_file("filter", 0644, system->entry, system,
878 &ftrace_subsystem_filter_fops);
8b372562
TZ
879 if (!entry) {
880 kfree(system->filter);
881 system->filter = NULL;
e1112b4d
TZ
882 pr_warning("Could not create debugfs "
883 "'%s/filter' entry\n", name);
8b372562 884 }
e1112b4d 885
8ae79a13
SR
886 entry = trace_create_file("enable", 0644, system->entry,
887 (void *)system->name,
888 &ftrace_system_enable_fops);
889
6ecc2d1c
SR
890 return system->entry;
891}
892
1473e441 893static int
701970b3
SR
894event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
895 const struct file_operations *id,
896 const struct file_operations *enable,
897 const struct file_operations *filter,
898 const struct file_operations *format)
1473e441
SR
899{
900 struct dentry *entry;
fd994989 901 int ret;
1473e441 902
6ecc2d1c
SR
903 /*
904 * If the trace point header did not define TRACE_SYSTEM
905 * then the system would be called "TRACE_SYSTEM".
906 */
6d723736 907 if (strcmp(call->system, TRACE_SYSTEM) != 0)
6ecc2d1c
SR
908 d_events = event_subsystem_dir(call->system, d_events);
909
fd994989
SR
910 if (call->raw_init) {
911 ret = call->raw_init();
912 if (ret < 0) {
913 pr_warning("Could not initialize trace point"
914 " events/%s\n", call->name);
915 return ret;
916 }
917 }
918
1473e441
SR
919 call->dir = debugfs_create_dir(call->name, d_events);
920 if (!call->dir) {
921 pr_warning("Could not create debugfs "
922 "'%s' directory\n", call->name);
923 return -1;
924 }
925
6d723736
SR
926 if (call->regfunc)
927 entry = trace_create_file("enable", 0644, call->dir, call,
701970b3 928 enable);
1473e441 929
6d723736
SR
930 if (call->id)
931 entry = trace_create_file("id", 0444, call->dir, call,
701970b3 932 id);
23725aee 933
cf027f64
TZ
934 if (call->define_fields) {
935 ret = call->define_fields();
936 if (ret < 0) {
937 pr_warning("Could not initialize trace point"
938 " events/%s\n", call->name);
939 return ret;
940 }
6d723736 941 entry = trace_create_file("filter", 0644, call->dir, call,
701970b3 942 filter);
cf027f64
TZ
943 }
944
981d081e
SR
945 /* A trace may not want to export its format */
946 if (!call->show_format)
947 return 0;
948
6d723736 949 entry = trace_create_file("format", 0444, call->dir, call,
701970b3 950 format);
6d723736
SR
951
952 return 0;
953}
954
955#define for_each_event(event, start, end) \
956 for (event = start; \
957 (unsigned long)event < (unsigned long)end; \
958 event++)
959
61f919a1 960#ifdef CONFIG_MODULES
701970b3
SR
961
962static LIST_HEAD(ftrace_module_file_list);
963
964/*
965 * Modules must own their file_operations to keep up with
966 * reference counting.
967 */
968struct ftrace_module_file_ops {
969 struct list_head list;
970 struct module *mod;
971 struct file_operations id;
972 struct file_operations enable;
973 struct file_operations format;
974 struct file_operations filter;
975};
976
977static struct ftrace_module_file_ops *
978trace_create_file_ops(struct module *mod)
979{
980 struct ftrace_module_file_ops *file_ops;
981
982 /*
983 * This is a bit of a PITA. To allow for correct reference
984 * counting, modules must "own" their file_operations.
985 * To do this, we allocate the file operations that will be
986 * used in the event directory.
987 */
988
989 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
990 if (!file_ops)
991 return NULL;
992
993 file_ops->mod = mod;
994
995 file_ops->id = ftrace_event_id_fops;
996 file_ops->id.owner = mod;
997
998 file_ops->enable = ftrace_enable_fops;
999 file_ops->enable.owner = mod;
1000
1001 file_ops->filter = ftrace_event_filter_fops;
1002 file_ops->filter.owner = mod;
1003
1004 file_ops->format = ftrace_event_format_fops;
1005 file_ops->format.owner = mod;
1006
1007 list_add(&file_ops->list, &ftrace_module_file_list);
1008
1009 return file_ops;
1010}
1011
6d723736
SR
1012static void trace_module_add_events(struct module *mod)
1013{
701970b3 1014 struct ftrace_module_file_ops *file_ops = NULL;
6d723736
SR
1015 struct ftrace_event_call *call, *start, *end;
1016 struct dentry *d_events;
1017
1018 start = mod->trace_events;
1019 end = mod->trace_events + mod->num_trace_events;
1020
1021 if (start == end)
1022 return;
1023
1024 d_events = event_trace_events_dir();
1025 if (!d_events)
1026 return;
1027
1028 for_each_event(call, start, end) {
1029 /* The linker may leave blanks */
1030 if (!call->name)
1031 continue;
701970b3
SR
1032
1033 /*
1034 * This module has events, create file ops for this module
1035 * if not already done.
1036 */
1037 if (!file_ops) {
1038 file_ops = trace_create_file_ops(mod);
1039 if (!file_ops)
1040 return;
1041 }
6d723736
SR
1042 call->mod = mod;
1043 list_add(&call->list, &ftrace_events);
701970b3
SR
1044 event_create_dir(call, d_events,
1045 &file_ops->id, &file_ops->enable,
1046 &file_ops->filter, &file_ops->format);
6d723736
SR
1047 }
1048}
1049
1050static void trace_module_remove_events(struct module *mod)
1051{
701970b3 1052 struct ftrace_module_file_ops *file_ops;
6d723736 1053 struct ftrace_event_call *call, *p;
9456f0fa 1054 bool found = false;
6d723736
SR
1055
1056 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1057 if (call->mod == mod) {
9456f0fa 1058 found = true;
6d723736
SR
1059 if (call->enabled) {
1060 call->enabled = 0;
1061 call->unregfunc();
1062 }
1063 if (call->event)
1064 unregister_ftrace_event(call->event);
1065 debugfs_remove_recursive(call->dir);
1066 list_del(&call->list);
2df75e41
LZ
1067 trace_destroy_fields(call);
1068 destroy_preds(call);
6d723736
SR
1069 }
1070 }
701970b3
SR
1071
1072 /* Now free the file_operations */
1073 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1074 if (file_ops->mod == mod)
1075 break;
1076 }
1077 if (&file_ops->list != &ftrace_module_file_list) {
1078 list_del(&file_ops->list);
1079 kfree(file_ops);
1080 }
9456f0fa
SR
1081
1082 /*
1083 * It is safest to reset the ring buffer if the module being unloaded
1084 * registered any events.
1085 */
1086 if (found)
1087 tracing_reset_current_online_cpus();
6d723736
SR
1088}
1089
61f919a1
SR
1090static int trace_module_notify(struct notifier_block *self,
1091 unsigned long val, void *data)
6d723736
SR
1092{
1093 struct module *mod = data;
1094
1095 mutex_lock(&event_mutex);
1096 switch (val) {
1097 case MODULE_STATE_COMING:
1098 trace_module_add_events(mod);
1099 break;
1100 case MODULE_STATE_GOING:
1101 trace_module_remove_events(mod);
1102 break;
1103 }
1104 mutex_unlock(&event_mutex);
fd994989 1105
1473e441
SR
1106 return 0;
1107}
61f919a1
SR
1108#else
1109static int trace_module_notify(struct notifier_block *self,
1110 unsigned long val, void *data)
1111{
1112 return 0;
1113}
1114#endif /* CONFIG_MODULES */
1473e441 1115
6d723736
SR
1116struct notifier_block trace_module_nb = {
1117 .notifier_call = trace_module_notify,
1118 .priority = 0,
1119};
1120
a59fd602
SR
1121extern struct ftrace_event_call __start_ftrace_events[];
1122extern struct ftrace_event_call __stop_ftrace_events[];
1123
b77e38aa
SR
1124static __init int event_trace_init(void)
1125{
a59fd602 1126 struct ftrace_event_call *call;
b77e38aa
SR
1127 struct dentry *d_tracer;
1128 struct dentry *entry;
1473e441 1129 struct dentry *d_events;
6d723736 1130 int ret;
b77e38aa
SR
1131
1132 d_tracer = tracing_init_dentry();
1133 if (!d_tracer)
1134 return 0;
1135
2314c4ae
SR
1136 entry = debugfs_create_file("available_events", 0444, d_tracer,
1137 (void *)&show_event_seq_ops,
1138 &ftrace_avail_fops);
1139 if (!entry)
1140 pr_warning("Could not create debugfs "
1141 "'available_events' entry\n");
1142
b77e38aa
SR
1143 entry = debugfs_create_file("set_event", 0644, d_tracer,
1144 (void *)&show_set_event_seq_ops,
1145 &ftrace_set_event_fops);
1146 if (!entry)
1147 pr_warning("Could not create debugfs "
1148 "'set_event' entry\n");
1149
1473e441
SR
1150 d_events = event_trace_events_dir();
1151 if (!d_events)
1152 return 0;
1153
d1b182a8
SR
1154 /* ring buffer internal formats */
1155 trace_create_file("header_page", 0444, d_events,
1156 ring_buffer_print_page_header,
1157 &ftrace_show_header_fops);
1158
1159 trace_create_file("header_event", 0444, d_events,
1160 ring_buffer_print_entry_header,
1161 &ftrace_show_header_fops);
1162
8ae79a13 1163 trace_create_file("enable", 0644, d_events,
8f31bfe5 1164 NULL, &ftrace_system_enable_fops);
8ae79a13 1165
6d723736 1166 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1473e441
SR
1167 /* The linker may leave blanks */
1168 if (!call->name)
1169 continue;
a59fd602 1170 list_add(&call->list, &ftrace_events);
701970b3
SR
1171 event_create_dir(call, d_events, &ftrace_event_id_fops,
1172 &ftrace_enable_fops, &ftrace_event_filter_fops,
1173 &ftrace_event_format_fops);
1473e441
SR
1174 }
1175
6d723736 1176 ret = register_module_notifier(&trace_module_nb);
55379376 1177 if (ret)
6d723736
SR
1178 pr_warning("Failed to register trace events module notifier\n");
1179
b77e38aa
SR
1180 return 0;
1181}
1182fs_initcall(event_trace_init);
e6187007
SR
1183
1184#ifdef CONFIG_FTRACE_STARTUP_TEST
1185
1186static DEFINE_SPINLOCK(test_spinlock);
1187static DEFINE_SPINLOCK(test_spinlock_irq);
1188static DEFINE_MUTEX(test_mutex);
1189
1190static __init void test_work(struct work_struct *dummy)
1191{
1192 spin_lock(&test_spinlock);
1193 spin_lock_irq(&test_spinlock_irq);
1194 udelay(1);
1195 spin_unlock_irq(&test_spinlock_irq);
1196 spin_unlock(&test_spinlock);
1197
1198 mutex_lock(&test_mutex);
1199 msleep(1);
1200 mutex_unlock(&test_mutex);
1201}
1202
1203static __init int event_test_thread(void *unused)
1204{
1205 void *test_malloc;
1206
1207 test_malloc = kmalloc(1234, GFP_KERNEL);
1208 if (!test_malloc)
1209 pr_info("failed to kmalloc\n");
1210
1211 schedule_on_each_cpu(test_work);
1212
1213 kfree(test_malloc);
1214
1215 set_current_state(TASK_INTERRUPTIBLE);
1216 while (!kthread_should_stop())
1217 schedule();
1218
1219 return 0;
1220}
1221
1222/*
1223 * Do various things that may trigger events.
1224 */
1225static __init void event_test_stuff(void)
1226{
1227 struct task_struct *test_thread;
1228
1229 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1230 msleep(1);
1231 kthread_stop(test_thread);
1232}
1233
1234/*
1235 * For every trace event defined, we will test each trace point separately,
1236 * and then by groups, and finally all trace points.
1237 */
9ea21c1e 1238static __init void event_trace_self_tests(void)
e6187007
SR
1239{
1240 struct ftrace_event_call *call;
1241 struct event_subsystem *system;
e6187007
SR
1242 int ret;
1243
1244 pr_info("Running tests on trace events:\n");
1245
1246 list_for_each_entry(call, &ftrace_events, list) {
1247
1248 /* Only test those that have a regfunc */
1249 if (!call->regfunc)
1250 continue;
1251
1252 pr_info("Testing event %s: ", call->name);
1253
1254 /*
1255 * If an event is already enabled, someone is using
1256 * it and the self test should not be on.
1257 */
1258 if (call->enabled) {
1259 pr_warning("Enabled event during self test!\n");
1260 WARN_ON_ONCE(1);
1261 continue;
1262 }
1263
1264 call->enabled = 1;
1265 call->regfunc();
1266
1267 event_test_stuff();
1268
1269 call->unregfunc();
1270 call->enabled = 0;
1271
1272 pr_cont("OK\n");
1273 }
1274
1275 /* Now test at the sub system level */
1276
1277 pr_info("Running tests on trace event systems:\n");
1278
1279 list_for_each_entry(system, &event_subsystems, list) {
1280
1281 /* the ftrace system is special, skip it */
1282 if (strcmp(system->name, "ftrace") == 0)
1283 continue;
1284
1285 pr_info("Testing event system %s: ", system->name);
1286
8f31bfe5 1287 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
e6187007
SR
1288 if (WARN_ON_ONCE(ret)) {
1289 pr_warning("error enabling system %s\n",
1290 system->name);
1291 continue;
1292 }
1293
1294 event_test_stuff();
1295
8f31bfe5 1296 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
e6187007
SR
1297 if (WARN_ON_ONCE(ret))
1298 pr_warning("error disabling system %s\n",
1299 system->name);
1300
1301 pr_cont("OK\n");
1302 }
1303
1304 /* Test with all events enabled */
1305
1306 pr_info("Running tests on all trace events:\n");
1307 pr_info("Testing all events: ");
1308
8f31bfe5 1309 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
e6187007 1310 if (WARN_ON_ONCE(ret)) {
e6187007 1311 pr_warning("error enabling all events\n");
9ea21c1e 1312 return;
e6187007
SR
1313 }
1314
1315 event_test_stuff();
1316
1317 /* reset sysname */
8f31bfe5 1318 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
e6187007
SR
1319 if (WARN_ON_ONCE(ret)) {
1320 pr_warning("error disabling all events\n");
9ea21c1e 1321 return;
e6187007
SR
1322 }
1323
1324 pr_cont("OK\n");
9ea21c1e
SR
1325}
1326
1327#ifdef CONFIG_FUNCTION_TRACER
1328
1329static DEFINE_PER_CPU(atomic_t, test_event_disable);
1330
1331static void
1332function_test_events_call(unsigned long ip, unsigned long parent_ip)
1333{
1334 struct ring_buffer_event *event;
1335 struct ftrace_entry *entry;
1336 unsigned long flags;
1337 long disabled;
1338 int resched;
1339 int cpu;
1340 int pc;
1341
1342 pc = preempt_count();
1343 resched = ftrace_preempt_disable();
1344 cpu = raw_smp_processor_id();
1345 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1346
1347 if (disabled != 1)
1348 goto out;
1349
1350 local_save_flags(flags);
1351
1352 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1353 flags, pc);
1354 if (!event)
1355 goto out;
1356 entry = ring_buffer_event_data(event);
1357 entry->ip = ip;
1358 entry->parent_ip = parent_ip;
1359
cb4764a6 1360 trace_nowake_buffer_unlock_commit(event, flags, pc);
9ea21c1e
SR
1361
1362 out:
1363 atomic_dec(&per_cpu(test_event_disable, cpu));
1364 ftrace_preempt_enable(resched);
1365}
1366
1367static struct ftrace_ops trace_ops __initdata =
1368{
1369 .func = function_test_events_call,
1370};
1371
1372static __init void event_trace_self_test_with_function(void)
1373{
1374 register_ftrace_function(&trace_ops);
1375 pr_info("Running tests again, along with the function tracer\n");
1376 event_trace_self_tests();
1377 unregister_ftrace_function(&trace_ops);
1378}
1379#else
1380static __init void event_trace_self_test_with_function(void)
1381{
1382}
1383#endif
1384
1385static __init int event_trace_self_tests_init(void)
1386{
1387
1388 event_trace_self_tests();
1389
1390 event_trace_self_test_with_function();
e6187007
SR
1391
1392 return 0;
1393}
1394
28d20e2d 1395late_initcall(event_trace_self_tests_init);
e6187007
SR
1396
1397#endif
This page took 0.104808 seconds and 5 git commands to generate.