V4L/DVB (13217): tda18271: handle rf_cal_on_startup properly during attach
[deliverable/linux.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
e6187007 18#include <linux/delay.h>
b77e38aa 19
020e5f85
LZ
20#include <asm/setup.h>
21
91729ef9 22#include "trace_output.h"
b77e38aa 23
4e5292ea 24#undef TRACE_SYSTEM
b628b3e6
SR
25#define TRACE_SYSTEM "TRACE_SYSTEM"
26
20c8928a 27DEFINE_MUTEX(event_mutex);
11a241a3 28
a59fd602
SR
29LIST_HEAD(ftrace_events);
30
540b7b8d 31int trace_define_field(struct ftrace_event_call *call, const char *type,
43b51ead
LZ
32 const char *name, int offset, int size, int is_signed,
33 int filter_type)
cf027f64
TZ
34{
35 struct ftrace_event_field *field;
36
fe9f57f2 37 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
38 if (!field)
39 goto err;
fe9f57f2 40
cf027f64
TZ
41 field->name = kstrdup(name, GFP_KERNEL);
42 if (!field->name)
43 goto err;
fe9f57f2 44
cf027f64
TZ
45 field->type = kstrdup(type, GFP_KERNEL);
46 if (!field->type)
47 goto err;
fe9f57f2 48
43b51ead
LZ
49 if (filter_type == FILTER_OTHER)
50 field->filter_type = filter_assign_type(type);
51 else
52 field->filter_type = filter_type;
53
cf027f64
TZ
54 field->offset = offset;
55 field->size = size;
a118e4d1 56 field->is_signed = is_signed;
aa38e9fc 57
cf027f64
TZ
58 list_add(&field->link, &call->fields);
59
60 return 0;
fe9f57f2 61
cf027f64
TZ
62err:
63 if (field) {
64 kfree(field->name);
65 kfree(field->type);
66 }
67 kfree(field);
fe9f57f2 68
cf027f64
TZ
69 return -ENOMEM;
70}
17c873ec 71EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 72
e647d6b3
LZ
73#define __common_field(type, item) \
74 ret = trace_define_field(call, #type, "common_" #item, \
75 offsetof(typeof(ent), item), \
76 sizeof(ent.item), \
43b51ead 77 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
78 if (ret) \
79 return ret;
80
81int trace_define_common_fields(struct ftrace_event_call *call)
82{
83 int ret;
84 struct trace_entry ent;
85
86 __common_field(unsigned short, type);
87 __common_field(unsigned char, flags);
88 __common_field(unsigned char, preempt_count);
89 __common_field(int, pid);
637e7e86 90 __common_field(int, lock_depth);
e647d6b3
LZ
91
92 return ret;
93}
540b7b8d 94EXPORT_SYMBOL_GPL(trace_define_common_fields);
e647d6b3 95
2df75e41
LZ
96#ifdef CONFIG_MODULES
97
98static void trace_destroy_fields(struct ftrace_event_call *call)
99{
100 struct ftrace_event_field *field, *next;
101
102 list_for_each_entry_safe(field, next, &call->fields, link) {
103 list_del(&field->link);
104 kfree(field->type);
105 kfree(field->name);
106 kfree(field);
107 }
108}
109
110#endif /* CONFIG_MODULES */
111
fd994989
SR
112static void ftrace_event_enable_disable(struct ftrace_event_call *call,
113 int enable)
114{
fd994989
SR
115 switch (enable) {
116 case 0:
117 if (call->enabled) {
118 call->enabled = 0;
b11c53e1 119 tracing_stop_cmdline_record();
69fd4f0e 120 call->unregfunc(call->data);
fd994989 121 }
fd994989
SR
122 break;
123 case 1:
da4d0302 124 if (!call->enabled) {
fd994989 125 call->enabled = 1;
b11c53e1 126 tracing_start_cmdline_record();
69fd4f0e 127 call->regfunc(call->data);
fd994989 128 }
fd994989
SR
129 break;
130 }
131}
132
0e907c99
Z
133static void ftrace_clear_events(void)
134{
135 struct ftrace_event_call *call;
136
137 mutex_lock(&event_mutex);
138 list_for_each_entry(call, &ftrace_events, list) {
139 ftrace_event_enable_disable(call, 0);
140 }
141 mutex_unlock(&event_mutex);
142}
143
8f31bfe5
LZ
144/*
145 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
146 */
147static int __ftrace_set_clr_event(const char *match, const char *sub,
148 const char *event, int set)
b77e38aa 149{
a59fd602 150 struct ftrace_event_call *call;
29f93943 151 int ret = -EINVAL;
8f31bfe5
LZ
152
153 mutex_lock(&event_mutex);
154 list_for_each_entry(call, &ftrace_events, list) {
155
156 if (!call->name || !call->regfunc)
157 continue;
158
159 if (match &&
160 strcmp(match, call->name) != 0 &&
161 strcmp(match, call->system) != 0)
162 continue;
163
164 if (sub && strcmp(sub, call->system) != 0)
165 continue;
166
167 if (event && strcmp(event, call->name) != 0)
168 continue;
169
170 ftrace_event_enable_disable(call, set);
171
172 ret = 0;
173 }
174 mutex_unlock(&event_mutex);
175
176 return ret;
177}
178
179static int ftrace_set_clr_event(char *buf, int set)
180{
b628b3e6 181 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
182
183 /*
184 * The buf format can be <subsystem>:<event-name>
185 * *:<event-name> means any event by that name.
186 * :<event-name> is the same.
187 *
188 * <subsystem>:* means all events in that subsystem
189 * <subsystem>: means the same.
190 *
191 * <name> (no ':') means all events in a subsystem with
192 * the name <name> or any event that matches <name>
193 */
194
195 match = strsep(&buf, ":");
196 if (buf) {
197 sub = match;
198 event = buf;
199 match = NULL;
200
201 if (!strlen(sub) || strcmp(sub, "*") == 0)
202 sub = NULL;
203 if (!strlen(event) || strcmp(event, "*") == 0)
204 event = NULL;
205 }
b77e38aa 206
8f31bfe5 207 return __ftrace_set_clr_event(match, sub, event, set);
b77e38aa
SR
208}
209
4671c794
SR
210/**
211 * trace_set_clr_event - enable or disable an event
212 * @system: system name to match (NULL for any system)
213 * @event: event name to match (NULL for all events, within system)
214 * @set: 1 to enable, 0 to disable
215 *
216 * This is a way for other parts of the kernel to enable or disable
217 * event recording.
218 *
219 * Returns 0 on success, -EINVAL if the parameters do not match any
220 * registered events.
221 */
222int trace_set_clr_event(const char *system, const char *event, int set)
223{
224 return __ftrace_set_clr_event(NULL, system, event, set);
225}
226
b77e38aa
SR
227/* 128 should be much more than enough */
228#define EVENT_BUF_SIZE 127
229
230static ssize_t
231ftrace_event_write(struct file *file, const char __user *ubuf,
232 size_t cnt, loff_t *ppos)
233{
48966364 234 struct trace_parser parser;
4ba7978e 235 ssize_t read, ret;
b77e38aa 236
4ba7978e 237 if (!cnt)
b77e38aa
SR
238 return 0;
239
1852fcce
SR
240 ret = tracing_update_buffers();
241 if (ret < 0)
242 return ret;
243
48966364 244 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
245 return -ENOMEM;
246
48966364 247 read = trace_get_user(&parser, ubuf, cnt, ppos);
248
4ba7978e 249 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 250 int set = 1;
b77e38aa 251
48966364 252 if (*parser.buffer == '!')
b77e38aa 253 set = 0;
b77e38aa 254
48966364 255 parser.buffer[parser.idx] = 0;
256
257 ret = ftrace_set_clr_event(parser.buffer + !set, set);
b77e38aa 258 if (ret)
48966364 259 goto out_put;
b77e38aa 260 }
b77e38aa
SR
261
262 ret = read;
263
48966364 264 out_put:
265 trace_parser_put(&parser);
b77e38aa
SR
266
267 return ret;
268}
269
270static void *
271t_next(struct seq_file *m, void *v, loff_t *pos)
272{
30bd39cd 273 struct ftrace_event_call *call = v;
b77e38aa
SR
274
275 (*pos)++;
276
30bd39cd 277 list_for_each_entry_continue(call, &ftrace_events, list) {
40e26815
SR
278 /*
279 * The ftrace subsystem is for showing formats only.
280 * They can not be enabled or disabled via the event files.
281 */
282 if (call->regfunc)
30bd39cd 283 return call;
40e26815 284 }
b77e38aa 285
30bd39cd 286 return NULL;
b77e38aa
SR
287}
288
289static void *t_start(struct seq_file *m, loff_t *pos)
290{
30bd39cd 291 struct ftrace_event_call *call;
e1c7e2a6
LZ
292 loff_t l;
293
20c8928a 294 mutex_lock(&event_mutex);
e1c7e2a6 295
30bd39cd 296 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 297 for (l = 0; l <= *pos; ) {
30bd39cd 298 call = t_next(m, call, &l);
e1c7e2a6
LZ
299 if (!call)
300 break;
301 }
302 return call;
b77e38aa
SR
303}
304
305static void *
306s_next(struct seq_file *m, void *v, loff_t *pos)
307{
30bd39cd 308 struct ftrace_event_call *call = v;
b77e38aa
SR
309
310 (*pos)++;
311
30bd39cd
LZ
312 list_for_each_entry_continue(call, &ftrace_events, list) {
313 if (call->enabled)
314 return call;
b77e38aa
SR
315 }
316
30bd39cd 317 return NULL;
b77e38aa
SR
318}
319
320static void *s_start(struct seq_file *m, loff_t *pos)
321{
30bd39cd 322 struct ftrace_event_call *call;
e1c7e2a6
LZ
323 loff_t l;
324
20c8928a 325 mutex_lock(&event_mutex);
e1c7e2a6 326
30bd39cd 327 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 328 for (l = 0; l <= *pos; ) {
30bd39cd 329 call = s_next(m, call, &l);
e1c7e2a6
LZ
330 if (!call)
331 break;
332 }
333 return call;
b77e38aa
SR
334}
335
336static int t_show(struct seq_file *m, void *v)
337{
338 struct ftrace_event_call *call = v;
339
b628b3e6
SR
340 if (strcmp(call->system, TRACE_SYSTEM) != 0)
341 seq_printf(m, "%s:", call->system);
b77e38aa
SR
342 seq_printf(m, "%s\n", call->name);
343
344 return 0;
345}
346
347static void t_stop(struct seq_file *m, void *p)
348{
20c8928a 349 mutex_unlock(&event_mutex);
b77e38aa
SR
350}
351
352static int
353ftrace_event_seq_open(struct inode *inode, struct file *file)
354{
b77e38aa
SR
355 const struct seq_operations *seq_ops;
356
357 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 358 (file->f_flags & O_TRUNC))
b77e38aa
SR
359 ftrace_clear_events();
360
361 seq_ops = inode->i_private;
20c8928a 362 return seq_open(file, seq_ops);
b77e38aa
SR
363}
364
1473e441
SR
365static ssize_t
366event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
367 loff_t *ppos)
368{
369 struct ftrace_event_call *call = filp->private_data;
370 char *buf;
371
da4d0302 372 if (call->enabled)
1473e441
SR
373 buf = "1\n";
374 else
375 buf = "0\n";
376
377 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
378}
379
380static ssize_t
381event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
382 loff_t *ppos)
383{
384 struct ftrace_event_call *call = filp->private_data;
385 char buf[64];
386 unsigned long val;
387 int ret;
388
389 if (cnt >= sizeof(buf))
390 return -EINVAL;
391
392 if (copy_from_user(&buf, ubuf, cnt))
393 return -EFAULT;
394
395 buf[cnt] = 0;
396
397 ret = strict_strtoul(buf, 10, &val);
398 if (ret < 0)
399 return ret;
400
1852fcce
SR
401 ret = tracing_update_buffers();
402 if (ret < 0)
403 return ret;
404
1473e441
SR
405 switch (val) {
406 case 0:
1473e441 407 case 1:
11a241a3 408 mutex_lock(&event_mutex);
fd994989 409 ftrace_event_enable_disable(call, val);
11a241a3 410 mutex_unlock(&event_mutex);
1473e441
SR
411 break;
412
413 default:
414 return -EINVAL;
415 }
416
417 *ppos += cnt;
418
419 return cnt;
420}
421
8ae79a13
SR
422static ssize_t
423system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
424 loff_t *ppos)
425{
c142b15d 426 const char set_to_char[4] = { '?', '0', '1', 'X' };
8ae79a13
SR
427 const char *system = filp->private_data;
428 struct ftrace_event_call *call;
429 char buf[2];
c142b15d 430 int set = 0;
8ae79a13
SR
431 int ret;
432
8ae79a13
SR
433 mutex_lock(&event_mutex);
434 list_for_each_entry(call, &ftrace_events, list) {
435 if (!call->name || !call->regfunc)
436 continue;
437
8f31bfe5 438 if (system && strcmp(call->system, system) != 0)
8ae79a13
SR
439 continue;
440
441 /*
442 * We need to find out if all the events are set
443 * or if all events or cleared, or if we have
444 * a mixture.
445 */
c142b15d
LZ
446 set |= (1 << !!call->enabled);
447
8ae79a13
SR
448 /*
449 * If we have a mixture, no need to look further.
450 */
c142b15d 451 if (set == 3)
8ae79a13
SR
452 break;
453 }
454 mutex_unlock(&event_mutex);
455
c142b15d 456 buf[0] = set_to_char[set];
8ae79a13 457 buf[1] = '\n';
8ae79a13
SR
458
459 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
460
461 return ret;
462}
463
464static ssize_t
465system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
466 loff_t *ppos)
467{
468 const char *system = filp->private_data;
469 unsigned long val;
8ae79a13
SR
470 char buf[64];
471 ssize_t ret;
472
473 if (cnt >= sizeof(buf))
474 return -EINVAL;
475
476 if (copy_from_user(&buf, ubuf, cnt))
477 return -EFAULT;
478
479 buf[cnt] = 0;
480
481 ret = strict_strtoul(buf, 10, &val);
482 if (ret < 0)
483 return ret;
484
485 ret = tracing_update_buffers();
486 if (ret < 0)
487 return ret;
488
8f31bfe5 489 if (val != 0 && val != 1)
8ae79a13 490 return -EINVAL;
8ae79a13 491
8f31bfe5 492 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
8ae79a13 493 if (ret)
8f31bfe5 494 goto out;
8ae79a13
SR
495
496 ret = cnt;
497
8f31bfe5 498out:
8ae79a13
SR
499 *ppos += cnt;
500
501 return ret;
502}
503
75db37d2
SR
504extern char *__bad_type_size(void);
505
91729ef9 506#undef FIELD
156b5f17 507#define FIELD(type, name) \
75db37d2 508 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
cf027f64
TZ
509 #type, "common_" #name, offsetof(typeof(field), name), \
510 sizeof(field.name)
91729ef9
SR
511
512static int trace_write_header(struct trace_seq *s)
513{
514 struct trace_entry field;
515
516 /* struct trace_entry */
517 return trace_seq_printf(s,
ce8eb2bf
SR
518 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
519 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
520 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
521 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
637e7e86 522 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
91729ef9 523 "\n",
89ec0dee 524 FIELD(unsigned short, type),
91729ef9
SR
525 FIELD(unsigned char, flags),
526 FIELD(unsigned char, preempt_count),
637e7e86
SR
527 FIELD(int, pid),
528 FIELD(int, lock_depth));
91729ef9 529}
da4d0302 530
981d081e
SR
531static ssize_t
532event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
533 loff_t *ppos)
534{
535 struct ftrace_event_call *call = filp->private_data;
536 struct trace_seq *s;
537 char *buf;
538 int r;
539
c269fc8c
TZ
540 if (*ppos)
541 return 0;
542
981d081e
SR
543 s = kmalloc(sizeof(*s), GFP_KERNEL);
544 if (!s)
545 return -ENOMEM;
546
547 trace_seq_init(s);
548
c5e4e192
SR
549 /* If any of the first writes fail, so will the show_format. */
550
551 trace_seq_printf(s, "name: %s\n", call->name);
552 trace_seq_printf(s, "ID: %d\n", call->id);
553 trace_seq_printf(s, "format:\n");
91729ef9
SR
554 trace_write_header(s);
555
e8f9f4d7 556 r = call->show_format(call, s);
981d081e
SR
557 if (!r) {
558 /*
559 * ug! The format output is bigger than a PAGE!!
560 */
561 buf = "FORMAT TOO BIG\n";
562 r = simple_read_from_buffer(ubuf, cnt, ppos,
563 buf, strlen(buf));
564 goto out;
565 }
566
567 r = simple_read_from_buffer(ubuf, cnt, ppos,
568 s->buffer, s->len);
569 out:
570 kfree(s);
571 return r;
572}
573
23725aee
PZ
574static ssize_t
575event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
576{
577 struct ftrace_event_call *call = filp->private_data;
578 struct trace_seq *s;
579 int r;
580
581 if (*ppos)
582 return 0;
583
584 s = kmalloc(sizeof(*s), GFP_KERNEL);
585 if (!s)
586 return -ENOMEM;
587
588 trace_seq_init(s);
589 trace_seq_printf(s, "%d\n", call->id);
590
591 r = simple_read_from_buffer(ubuf, cnt, ppos,
592 s->buffer, s->len);
593 kfree(s);
594 return r;
595}
596
7ce7e424
TZ
597static ssize_t
598event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
599 loff_t *ppos)
600{
601 struct ftrace_event_call *call = filp->private_data;
602 struct trace_seq *s;
603 int r;
604
605 if (*ppos)
606 return 0;
607
608 s = kmalloc(sizeof(*s), GFP_KERNEL);
609 if (!s)
610 return -ENOMEM;
611
612 trace_seq_init(s);
613
8b372562 614 print_event_filter(call, s);
4bda2d51 615 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
616
617 kfree(s);
618
619 return r;
620}
621
622static ssize_t
623event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
624 loff_t *ppos)
625{
626 struct ftrace_event_call *call = filp->private_data;
8b372562 627 char *buf;
7ce7e424
TZ
628 int err;
629
8b372562 630 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
631 return -EINVAL;
632
8b372562
TZ
633 buf = (char *)__get_free_page(GFP_TEMPORARY);
634 if (!buf)
7ce7e424
TZ
635 return -ENOMEM;
636
8b372562
TZ
637 if (copy_from_user(buf, ubuf, cnt)) {
638 free_page((unsigned long) buf);
639 return -EFAULT;
7ce7e424 640 }
8b372562 641 buf[cnt] = '\0';
7ce7e424 642
8b372562
TZ
643 err = apply_event_filter(call, buf);
644 free_page((unsigned long) buf);
645 if (err < 0)
44e9c8b7 646 return err;
0a19e53c 647
7ce7e424
TZ
648 *ppos += cnt;
649
650 return cnt;
651}
652
cfb180f3
TZ
653static ssize_t
654subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
655 loff_t *ppos)
656{
657 struct event_subsystem *system = filp->private_data;
658 struct trace_seq *s;
659 int r;
660
661 if (*ppos)
662 return 0;
663
664 s = kmalloc(sizeof(*s), GFP_KERNEL);
665 if (!s)
666 return -ENOMEM;
667
668 trace_seq_init(s);
669
8b372562 670 print_subsystem_event_filter(system, s);
4bda2d51 671 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
672
673 kfree(s);
674
675 return r;
676}
677
678static ssize_t
679subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
680 loff_t *ppos)
681{
682 struct event_subsystem *system = filp->private_data;
8b372562 683 char *buf;
cfb180f3
TZ
684 int err;
685
8b372562 686 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
687 return -EINVAL;
688
8b372562
TZ
689 buf = (char *)__get_free_page(GFP_TEMPORARY);
690 if (!buf)
cfb180f3
TZ
691 return -ENOMEM;
692
8b372562
TZ
693 if (copy_from_user(buf, ubuf, cnt)) {
694 free_page((unsigned long) buf);
695 return -EFAULT;
cfb180f3 696 }
8b372562 697 buf[cnt] = '\0';
cfb180f3 698
8b372562
TZ
699 err = apply_subsystem_event_filter(system, buf);
700 free_page((unsigned long) buf);
701 if (err < 0)
44e9c8b7 702 return err;
cfb180f3
TZ
703
704 *ppos += cnt;
705
706 return cnt;
707}
708
d1b182a8
SR
709static ssize_t
710show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
711{
712 int (*func)(struct trace_seq *s) = filp->private_data;
713 struct trace_seq *s;
714 int r;
715
716 if (*ppos)
717 return 0;
718
719 s = kmalloc(sizeof(*s), GFP_KERNEL);
720 if (!s)
721 return -ENOMEM;
722
723 trace_seq_init(s);
724
725 func(s);
726 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
727
728 kfree(s);
729
730 return r;
731}
732
b77e38aa
SR
733static const struct seq_operations show_event_seq_ops = {
734 .start = t_start,
735 .next = t_next,
736 .show = t_show,
737 .stop = t_stop,
738};
739
740static const struct seq_operations show_set_event_seq_ops = {
741 .start = s_start,
742 .next = s_next,
743 .show = t_show,
744 .stop = t_stop,
745};
746
2314c4ae
SR
747static const struct file_operations ftrace_avail_fops = {
748 .open = ftrace_event_seq_open,
749 .read = seq_read,
750 .llseek = seq_lseek,
751 .release = seq_release,
752};
753
b77e38aa
SR
754static const struct file_operations ftrace_set_event_fops = {
755 .open = ftrace_event_seq_open,
756 .read = seq_read,
757 .write = ftrace_event_write,
758 .llseek = seq_lseek,
759 .release = seq_release,
760};
761
1473e441
SR
762static const struct file_operations ftrace_enable_fops = {
763 .open = tracing_open_generic,
764 .read = event_enable_read,
765 .write = event_enable_write,
766};
767
981d081e
SR
768static const struct file_operations ftrace_event_format_fops = {
769 .open = tracing_open_generic,
770 .read = event_format_read,
771};
772
23725aee
PZ
773static const struct file_operations ftrace_event_id_fops = {
774 .open = tracing_open_generic,
775 .read = event_id_read,
776};
777
7ce7e424
TZ
778static const struct file_operations ftrace_event_filter_fops = {
779 .open = tracing_open_generic,
780 .read = event_filter_read,
781 .write = event_filter_write,
782};
783
cfb180f3
TZ
784static const struct file_operations ftrace_subsystem_filter_fops = {
785 .open = tracing_open_generic,
786 .read = subsystem_filter_read,
787 .write = subsystem_filter_write,
788};
789
8ae79a13
SR
790static const struct file_operations ftrace_system_enable_fops = {
791 .open = tracing_open_generic,
792 .read = system_enable_read,
793 .write = system_enable_write,
794};
795
d1b182a8
SR
796static const struct file_operations ftrace_show_header_fops = {
797 .open = tracing_open_generic,
798 .read = show_header,
799};
800
1473e441
SR
801static struct dentry *event_trace_events_dir(void)
802{
803 static struct dentry *d_tracer;
804 static struct dentry *d_events;
805
806 if (d_events)
807 return d_events;
808
809 d_tracer = tracing_init_dentry();
810 if (!d_tracer)
811 return NULL;
812
813 d_events = debugfs_create_dir("events", d_tracer);
814 if (!d_events)
815 pr_warning("Could not create debugfs "
816 "'events' directory\n");
817
818 return d_events;
819}
820
6ecc2d1c
SR
821static LIST_HEAD(event_subsystems);
822
823static struct dentry *
824event_subsystem_dir(const char *name, struct dentry *d_events)
825{
826 struct event_subsystem *system;
e1112b4d 827 struct dentry *entry;
6ecc2d1c
SR
828
829 /* First see if we did not already create this dir */
830 list_for_each_entry(system, &event_subsystems, list) {
dc82ec98
XG
831 if (strcmp(system->name, name) == 0) {
832 system->nr_events++;
6ecc2d1c 833 return system->entry;
dc82ec98 834 }
6ecc2d1c
SR
835 }
836
837 /* need to create new entry */
838 system = kmalloc(sizeof(*system), GFP_KERNEL);
839 if (!system) {
840 pr_warning("No memory to create event subsystem %s\n",
841 name);
842 return d_events;
843 }
844
845 system->entry = debugfs_create_dir(name, d_events);
846 if (!system->entry) {
847 pr_warning("Could not create event subsystem %s\n",
848 name);
849 kfree(system);
850 return d_events;
851 }
852
dc82ec98 853 system->nr_events = 1;
6d723736
SR
854 system->name = kstrdup(name, GFP_KERNEL);
855 if (!system->name) {
856 debugfs_remove(system->entry);
857 kfree(system);
858 return d_events;
859 }
860
6ecc2d1c
SR
861 list_add(&system->list, &event_subsystems);
862
30e673b2 863 system->filter = NULL;
cfb180f3 864
8b372562
TZ
865 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
866 if (!system->filter) {
867 pr_warning("Could not allocate filter for subsystem "
868 "'%s'\n", name);
869 return system->entry;
870 }
871
e1112b4d
TZ
872 entry = debugfs_create_file("filter", 0644, system->entry, system,
873 &ftrace_subsystem_filter_fops);
8b372562
TZ
874 if (!entry) {
875 kfree(system->filter);
876 system->filter = NULL;
e1112b4d
TZ
877 pr_warning("Could not create debugfs "
878 "'%s/filter' entry\n", name);
8b372562 879 }
e1112b4d 880
f3f3f009
FW
881 trace_create_file("enable", 0644, system->entry,
882 (void *)system->name,
883 &ftrace_system_enable_fops);
8ae79a13 884
6ecc2d1c
SR
885 return system->entry;
886}
887
1473e441 888static int
701970b3
SR
889event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
890 const struct file_operations *id,
891 const struct file_operations *enable,
892 const struct file_operations *filter,
893 const struct file_operations *format)
1473e441 894{
fd994989 895 int ret;
1473e441 896
6ecc2d1c
SR
897 /*
898 * If the trace point header did not define TRACE_SYSTEM
899 * then the system would be called "TRACE_SYSTEM".
900 */
6d723736 901 if (strcmp(call->system, TRACE_SYSTEM) != 0)
6ecc2d1c
SR
902 d_events = event_subsystem_dir(call->system, d_events);
903
1473e441
SR
904 call->dir = debugfs_create_dir(call->name, d_events);
905 if (!call->dir) {
906 pr_warning("Could not create debugfs "
907 "'%s' directory\n", call->name);
908 return -1;
909 }
910
6d723736 911 if (call->regfunc)
f3f3f009
FW
912 trace_create_file("enable", 0644, call->dir, call,
913 enable);
1473e441 914
af6af30c 915 if (call->id && call->profile_enable)
f3f3f009
FW
916 trace_create_file("id", 0444, call->dir, call,
917 id);
23725aee 918
cf027f64 919 if (call->define_fields) {
14be96c9 920 ret = call->define_fields(call);
cf027f64
TZ
921 if (ret < 0) {
922 pr_warning("Could not initialize trace point"
923 " events/%s\n", call->name);
924 return ret;
925 }
f3f3f009
FW
926 trace_create_file("filter", 0644, call->dir, call,
927 filter);
cf027f64
TZ
928 }
929
981d081e
SR
930 /* A trace may not want to export its format */
931 if (!call->show_format)
932 return 0;
933
f3f3f009
FW
934 trace_create_file("format", 0444, call->dir, call,
935 format);
6d723736
SR
936
937 return 0;
938}
939
940#define for_each_event(event, start, end) \
941 for (event = start; \
942 (unsigned long)event < (unsigned long)end; \
943 event++)
944
61f919a1 945#ifdef CONFIG_MODULES
701970b3
SR
946
947static LIST_HEAD(ftrace_module_file_list);
948
949/*
950 * Modules must own their file_operations to keep up with
951 * reference counting.
952 */
953struct ftrace_module_file_ops {
954 struct list_head list;
955 struct module *mod;
956 struct file_operations id;
957 struct file_operations enable;
958 struct file_operations format;
959 struct file_operations filter;
960};
961
a2ca5e03
FW
962static void remove_subsystem_dir(const char *name)
963{
964 struct event_subsystem *system;
965
966 if (strcmp(name, TRACE_SYSTEM) == 0)
967 return;
968
969 list_for_each_entry(system, &event_subsystems, list) {
970 if (strcmp(system->name, name) == 0) {
971 if (!--system->nr_events) {
972 struct event_filter *filter = system->filter;
973
974 debugfs_remove_recursive(system->entry);
975 list_del(&system->list);
976 if (filter) {
977 kfree(filter->filter_string);
978 kfree(filter);
979 }
980 kfree(system->name);
981 kfree(system);
982 }
983 break;
984 }
985 }
986}
987
701970b3
SR
988static struct ftrace_module_file_ops *
989trace_create_file_ops(struct module *mod)
990{
991 struct ftrace_module_file_ops *file_ops;
992
993 /*
994 * This is a bit of a PITA. To allow for correct reference
995 * counting, modules must "own" their file_operations.
996 * To do this, we allocate the file operations that will be
997 * used in the event directory.
998 */
999
1000 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1001 if (!file_ops)
1002 return NULL;
1003
1004 file_ops->mod = mod;
1005
1006 file_ops->id = ftrace_event_id_fops;
1007 file_ops->id.owner = mod;
1008
1009 file_ops->enable = ftrace_enable_fops;
1010 file_ops->enable.owner = mod;
1011
1012 file_ops->filter = ftrace_event_filter_fops;
1013 file_ops->filter.owner = mod;
1014
1015 file_ops->format = ftrace_event_format_fops;
1016 file_ops->format.owner = mod;
1017
1018 list_add(&file_ops->list, &ftrace_module_file_list);
1019
1020 return file_ops;
1021}
1022
6d723736
SR
1023static void trace_module_add_events(struct module *mod)
1024{
701970b3 1025 struct ftrace_module_file_ops *file_ops = NULL;
6d723736
SR
1026 struct ftrace_event_call *call, *start, *end;
1027 struct dentry *d_events;
f744bd57 1028 int ret;
6d723736
SR
1029
1030 start = mod->trace_events;
1031 end = mod->trace_events + mod->num_trace_events;
1032
1033 if (start == end)
1034 return;
1035
1036 d_events = event_trace_events_dir();
1037 if (!d_events)
1038 return;
1039
1040 for_each_event(call, start, end) {
1041 /* The linker may leave blanks */
1042 if (!call->name)
1043 continue;
f744bd57
JB
1044 if (call->raw_init) {
1045 ret = call->raw_init();
1046 if (ret < 0) {
1047 if (ret != -ENOSYS)
1048 pr_warning("Could not initialize trace "
1049 "point events/%s\n", call->name);
1050 continue;
1051 }
1052 }
701970b3
SR
1053 /*
1054 * This module has events, create file ops for this module
1055 * if not already done.
1056 */
1057 if (!file_ops) {
1058 file_ops = trace_create_file_ops(mod);
1059 if (!file_ops)
1060 return;
1061 }
6d723736
SR
1062 call->mod = mod;
1063 list_add(&call->list, &ftrace_events);
701970b3
SR
1064 event_create_dir(call, d_events,
1065 &file_ops->id, &file_ops->enable,
1066 &file_ops->filter, &file_ops->format);
6d723736
SR
1067 }
1068}
1069
1070static void trace_module_remove_events(struct module *mod)
1071{
701970b3 1072 struct ftrace_module_file_ops *file_ops;
6d723736 1073 struct ftrace_event_call *call, *p;
9456f0fa 1074 bool found = false;
6d723736 1075
110bf2b7 1076 down_write(&trace_event_mutex);
6d723736
SR
1077 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1078 if (call->mod == mod) {
9456f0fa 1079 found = true;
0e907c99 1080 ftrace_event_enable_disable(call, 0);
6d723736 1081 if (call->event)
110bf2b7 1082 __unregister_ftrace_event(call->event);
6d723736
SR
1083 debugfs_remove_recursive(call->dir);
1084 list_del(&call->list);
2df75e41
LZ
1085 trace_destroy_fields(call);
1086 destroy_preds(call);
dc82ec98 1087 remove_subsystem_dir(call->system);
6d723736
SR
1088 }
1089 }
701970b3
SR
1090
1091 /* Now free the file_operations */
1092 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1093 if (file_ops->mod == mod)
1094 break;
1095 }
1096 if (&file_ops->list != &ftrace_module_file_list) {
1097 list_del(&file_ops->list);
1098 kfree(file_ops);
1099 }
9456f0fa
SR
1100
1101 /*
1102 * It is safest to reset the ring buffer if the module being unloaded
1103 * registered any events.
1104 */
1105 if (found)
1106 tracing_reset_current_online_cpus();
110bf2b7 1107 up_write(&trace_event_mutex);
6d723736
SR
1108}
1109
61f919a1
SR
1110static int trace_module_notify(struct notifier_block *self,
1111 unsigned long val, void *data)
6d723736
SR
1112{
1113 struct module *mod = data;
1114
1115 mutex_lock(&event_mutex);
1116 switch (val) {
1117 case MODULE_STATE_COMING:
1118 trace_module_add_events(mod);
1119 break;
1120 case MODULE_STATE_GOING:
1121 trace_module_remove_events(mod);
1122 break;
1123 }
1124 mutex_unlock(&event_mutex);
fd994989 1125
1473e441
SR
1126 return 0;
1127}
61f919a1
SR
1128#else
1129static int trace_module_notify(struct notifier_block *self,
1130 unsigned long val, void *data)
1131{
1132 return 0;
1133}
1134#endif /* CONFIG_MODULES */
1473e441 1135
ec827c7e 1136static struct notifier_block trace_module_nb = {
6d723736
SR
1137 .notifier_call = trace_module_notify,
1138 .priority = 0,
1139};
1140
a59fd602
SR
1141extern struct ftrace_event_call __start_ftrace_events[];
1142extern struct ftrace_event_call __stop_ftrace_events[];
1143
020e5f85
LZ
1144static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1145
1146static __init int setup_trace_event(char *str)
1147{
1148 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1149 ring_buffer_expanded = 1;
1150 tracing_selftest_disabled = 1;
1151
1152 return 1;
1153}
1154__setup("trace_event=", setup_trace_event);
1155
b77e38aa
SR
1156static __init int event_trace_init(void)
1157{
a59fd602 1158 struct ftrace_event_call *call;
b77e38aa
SR
1159 struct dentry *d_tracer;
1160 struct dentry *entry;
1473e441 1161 struct dentry *d_events;
6d723736 1162 int ret;
020e5f85
LZ
1163 char *buf = bootup_event_buf;
1164 char *token;
b77e38aa
SR
1165
1166 d_tracer = tracing_init_dentry();
1167 if (!d_tracer)
1168 return 0;
1169
2314c4ae
SR
1170 entry = debugfs_create_file("available_events", 0444, d_tracer,
1171 (void *)&show_event_seq_ops,
1172 &ftrace_avail_fops);
1173 if (!entry)
1174 pr_warning("Could not create debugfs "
1175 "'available_events' entry\n");
1176
b77e38aa
SR
1177 entry = debugfs_create_file("set_event", 0644, d_tracer,
1178 (void *)&show_set_event_seq_ops,
1179 &ftrace_set_event_fops);
1180 if (!entry)
1181 pr_warning("Could not create debugfs "
1182 "'set_event' entry\n");
1183
1473e441
SR
1184 d_events = event_trace_events_dir();
1185 if (!d_events)
1186 return 0;
1187
d1b182a8
SR
1188 /* ring buffer internal formats */
1189 trace_create_file("header_page", 0444, d_events,
1190 ring_buffer_print_page_header,
1191 &ftrace_show_header_fops);
1192
1193 trace_create_file("header_event", 0444, d_events,
1194 ring_buffer_print_entry_header,
1195 &ftrace_show_header_fops);
1196
8ae79a13 1197 trace_create_file("enable", 0644, d_events,
8f31bfe5 1198 NULL, &ftrace_system_enable_fops);
8ae79a13 1199
6d723736 1200 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1473e441
SR
1201 /* The linker may leave blanks */
1202 if (!call->name)
1203 continue;
f744bd57
JB
1204 if (call->raw_init) {
1205 ret = call->raw_init();
1206 if (ret < 0) {
1207 if (ret != -ENOSYS)
1208 pr_warning("Could not initialize trace "
1209 "point events/%s\n", call->name);
1210 continue;
1211 }
1212 }
a59fd602 1213 list_add(&call->list, &ftrace_events);
701970b3
SR
1214 event_create_dir(call, d_events, &ftrace_event_id_fops,
1215 &ftrace_enable_fops, &ftrace_event_filter_fops,
1216 &ftrace_event_format_fops);
1473e441
SR
1217 }
1218
020e5f85
LZ
1219 while (true) {
1220 token = strsep(&buf, ",");
1221
1222 if (!token)
1223 break;
1224 if (!*token)
1225 continue;
1226
1227 ret = ftrace_set_clr_event(token, 1);
1228 if (ret)
1229 pr_warning("Failed to enable trace event: %s\n", token);
1230 }
1231
6d723736 1232 ret = register_module_notifier(&trace_module_nb);
55379376 1233 if (ret)
6d723736
SR
1234 pr_warning("Failed to register trace events module notifier\n");
1235
b77e38aa
SR
1236 return 0;
1237}
1238fs_initcall(event_trace_init);
e6187007
SR
1239
1240#ifdef CONFIG_FTRACE_STARTUP_TEST
1241
1242static DEFINE_SPINLOCK(test_spinlock);
1243static DEFINE_SPINLOCK(test_spinlock_irq);
1244static DEFINE_MUTEX(test_mutex);
1245
1246static __init void test_work(struct work_struct *dummy)
1247{
1248 spin_lock(&test_spinlock);
1249 spin_lock_irq(&test_spinlock_irq);
1250 udelay(1);
1251 spin_unlock_irq(&test_spinlock_irq);
1252 spin_unlock(&test_spinlock);
1253
1254 mutex_lock(&test_mutex);
1255 msleep(1);
1256 mutex_unlock(&test_mutex);
1257}
1258
1259static __init int event_test_thread(void *unused)
1260{
1261 void *test_malloc;
1262
1263 test_malloc = kmalloc(1234, GFP_KERNEL);
1264 if (!test_malloc)
1265 pr_info("failed to kmalloc\n");
1266
1267 schedule_on_each_cpu(test_work);
1268
1269 kfree(test_malloc);
1270
1271 set_current_state(TASK_INTERRUPTIBLE);
1272 while (!kthread_should_stop())
1273 schedule();
1274
1275 return 0;
1276}
1277
1278/*
1279 * Do various things that may trigger events.
1280 */
1281static __init void event_test_stuff(void)
1282{
1283 struct task_struct *test_thread;
1284
1285 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1286 msleep(1);
1287 kthread_stop(test_thread);
1288}
1289
1290/*
1291 * For every trace event defined, we will test each trace point separately,
1292 * and then by groups, and finally all trace points.
1293 */
9ea21c1e 1294static __init void event_trace_self_tests(void)
e6187007
SR
1295{
1296 struct ftrace_event_call *call;
1297 struct event_subsystem *system;
e6187007
SR
1298 int ret;
1299
1300 pr_info("Running tests on trace events:\n");
1301
1302 list_for_each_entry(call, &ftrace_events, list) {
1303
1304 /* Only test those that have a regfunc */
1305 if (!call->regfunc)
1306 continue;
1307
1f5a6b45
SR
1308/*
1309 * Testing syscall events here is pretty useless, but
1310 * we still do it if configured. But this is time consuming.
1311 * What we really need is a user thread to perform the
1312 * syscalls as we test.
1313 */
1314#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1315 if (call->system &&
1316 strcmp(call->system, "syscalls") == 0)
1317 continue;
1318#endif
1319
e6187007
SR
1320 pr_info("Testing event %s: ", call->name);
1321
1322 /*
1323 * If an event is already enabled, someone is using
1324 * it and the self test should not be on.
1325 */
1326 if (call->enabled) {
1327 pr_warning("Enabled event during self test!\n");
1328 WARN_ON_ONCE(1);
1329 continue;
1330 }
1331
0e907c99 1332 ftrace_event_enable_disable(call, 1);
e6187007 1333 event_test_stuff();
0e907c99 1334 ftrace_event_enable_disable(call, 0);
e6187007
SR
1335
1336 pr_cont("OK\n");
1337 }
1338
1339 /* Now test at the sub system level */
1340
1341 pr_info("Running tests on trace event systems:\n");
1342
1343 list_for_each_entry(system, &event_subsystems, list) {
1344
1345 /* the ftrace system is special, skip it */
1346 if (strcmp(system->name, "ftrace") == 0)
1347 continue;
1348
1349 pr_info("Testing event system %s: ", system->name);
1350
8f31bfe5 1351 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
e6187007
SR
1352 if (WARN_ON_ONCE(ret)) {
1353 pr_warning("error enabling system %s\n",
1354 system->name);
1355 continue;
1356 }
1357
1358 event_test_stuff();
1359
8f31bfe5 1360 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
e6187007
SR
1361 if (WARN_ON_ONCE(ret))
1362 pr_warning("error disabling system %s\n",
1363 system->name);
1364
1365 pr_cont("OK\n");
1366 }
1367
1368 /* Test with all events enabled */
1369
1370 pr_info("Running tests on all trace events:\n");
1371 pr_info("Testing all events: ");
1372
8f31bfe5 1373 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
e6187007 1374 if (WARN_ON_ONCE(ret)) {
e6187007 1375 pr_warning("error enabling all events\n");
9ea21c1e 1376 return;
e6187007
SR
1377 }
1378
1379 event_test_stuff();
1380
1381 /* reset sysname */
8f31bfe5 1382 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
e6187007
SR
1383 if (WARN_ON_ONCE(ret)) {
1384 pr_warning("error disabling all events\n");
9ea21c1e 1385 return;
e6187007
SR
1386 }
1387
1388 pr_cont("OK\n");
9ea21c1e
SR
1389}
1390
1391#ifdef CONFIG_FUNCTION_TRACER
1392
245b2e70 1393static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
1394
1395static void
1396function_test_events_call(unsigned long ip, unsigned long parent_ip)
1397{
1398 struct ring_buffer_event *event;
e77405ad 1399 struct ring_buffer *buffer;
9ea21c1e
SR
1400 struct ftrace_entry *entry;
1401 unsigned long flags;
1402 long disabled;
1403 int resched;
1404 int cpu;
1405 int pc;
1406
1407 pc = preempt_count();
1408 resched = ftrace_preempt_disable();
1409 cpu = raw_smp_processor_id();
245b2e70 1410 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
1411
1412 if (disabled != 1)
1413 goto out;
1414
1415 local_save_flags(flags);
1416
e77405ad
SR
1417 event = trace_current_buffer_lock_reserve(&buffer,
1418 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
1419 flags, pc);
1420 if (!event)
1421 goto out;
1422 entry = ring_buffer_event_data(event);
1423 entry->ip = ip;
1424 entry->parent_ip = parent_ip;
1425
e77405ad 1426 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
1427
1428 out:
245b2e70 1429 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
1430 ftrace_preempt_enable(resched);
1431}
1432
1433static struct ftrace_ops trace_ops __initdata =
1434{
1435 .func = function_test_events_call,
1436};
1437
1438static __init void event_trace_self_test_with_function(void)
1439{
1440 register_ftrace_function(&trace_ops);
1441 pr_info("Running tests again, along with the function tracer\n");
1442 event_trace_self_tests();
1443 unregister_ftrace_function(&trace_ops);
1444}
1445#else
1446static __init void event_trace_self_test_with_function(void)
1447{
1448}
1449#endif
1450
1451static __init int event_trace_self_tests_init(void)
1452{
020e5f85
LZ
1453 if (!tracing_selftest_disabled) {
1454 event_trace_self_tests();
1455 event_trace_self_test_with_function();
1456 }
e6187007
SR
1457
1458 return 0;
1459}
1460
28d20e2d 1461late_initcall(event_trace_self_tests_init);
e6187007
SR
1462
1463#endif
This page took 0.163579 seconds and 5 git commands to generate.