tracing: Explain about unstable clock on resume with ring buffer warning
[deliverable/linux.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
5a0e3ad6 18#include <linux/slab.h>
e6187007 19#include <linux/delay.h>
b77e38aa 20
020e5f85
LZ
21#include <asm/setup.h>
22
91729ef9 23#include "trace_output.h"
b77e38aa 24
4e5292ea 25#undef TRACE_SYSTEM
b628b3e6
SR
26#define TRACE_SYSTEM "TRACE_SYSTEM"
27
20c8928a 28DEFINE_MUTEX(event_mutex);
11a241a3 29
04295780
SR
30DEFINE_MUTEX(event_storage_mutex);
31EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33char event_storage[EVENT_STORAGE_SIZE];
34EXPORT_SYMBOL_GPL(event_storage);
35
a59fd602 36LIST_HEAD(ftrace_events);
8728fe50 37LIST_HEAD(ftrace_common_fields);
a59fd602 38
2e33af02
SR
39struct list_head *
40trace_get_fields(struct ftrace_event_call *event_call)
41{
42 if (!event_call->class->get_fields)
43 return &event_call->class->fields;
44 return event_call->class->get_fields(event_call);
45}
46
8728fe50
LZ
47static int __trace_define_field(struct list_head *head, const char *type,
48 const char *name, int offset, int size,
49 int is_signed, int filter_type)
cf027f64
TZ
50{
51 struct ftrace_event_field *field;
52
fe9f57f2 53 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
54 if (!field)
55 goto err;
fe9f57f2 56
cf027f64
TZ
57 field->name = kstrdup(name, GFP_KERNEL);
58 if (!field->name)
59 goto err;
fe9f57f2 60
cf027f64
TZ
61 field->type = kstrdup(type, GFP_KERNEL);
62 if (!field->type)
63 goto err;
fe9f57f2 64
43b51ead
LZ
65 if (filter_type == FILTER_OTHER)
66 field->filter_type = filter_assign_type(type);
67 else
68 field->filter_type = filter_type;
69
cf027f64
TZ
70 field->offset = offset;
71 field->size = size;
a118e4d1 72 field->is_signed = is_signed;
aa38e9fc 73
2e33af02 74 list_add(&field->link, head);
cf027f64
TZ
75
76 return 0;
fe9f57f2 77
cf027f64 78err:
7b60997f 79 if (field)
cf027f64 80 kfree(field->name);
cf027f64 81 kfree(field);
fe9f57f2 82
cf027f64
TZ
83 return -ENOMEM;
84}
8728fe50
LZ
85
86int trace_define_field(struct ftrace_event_call *call, const char *type,
87 const char *name, int offset, int size, int is_signed,
88 int filter_type)
89{
90 struct list_head *head;
91
92 if (WARN_ON(!call->class))
93 return 0;
94
95 head = trace_get_fields(call);
96 return __trace_define_field(head, type, name, offset, size,
97 is_signed, filter_type);
98}
17c873ec 99EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 100
e647d6b3 101#define __common_field(type, item) \
8728fe50
LZ
102 ret = __trace_define_field(&ftrace_common_fields, #type, \
103 "common_" #item, \
104 offsetof(typeof(ent), item), \
105 sizeof(ent.item), \
106 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
107 if (ret) \
108 return ret;
109
8728fe50 110static int trace_define_common_fields(void)
e647d6b3
LZ
111{
112 int ret;
113 struct trace_entry ent;
114
115 __common_field(unsigned short, type);
116 __common_field(unsigned char, flags);
117 __common_field(unsigned char, preempt_count);
118 __common_field(int, pid);
e647d6b3
LZ
119
120 return ret;
121}
122
bd1a5c84 123void trace_destroy_fields(struct ftrace_event_call *call)
2df75e41
LZ
124{
125 struct ftrace_event_field *field, *next;
2e33af02 126 struct list_head *head;
2df75e41 127
2e33af02
SR
128 head = trace_get_fields(call);
129 list_for_each_entry_safe(field, next, head, link) {
2df75e41
LZ
130 list_del(&field->link);
131 kfree(field->type);
132 kfree(field->name);
133 kfree(field);
134 }
135}
136
87d9b4e1
LZ
137int trace_event_raw_init(struct ftrace_event_call *call)
138{
139 int id;
140
80decc70 141 id = register_ftrace_event(&call->event);
87d9b4e1
LZ
142 if (!id)
143 return -ENODEV;
87d9b4e1
LZ
144
145 return 0;
146}
147EXPORT_SYMBOL_GPL(trace_event_raw_init);
148
a1d0ce82
SR
149int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
150{
151 switch (type) {
152 case TRACE_REG_REGISTER:
153 return tracepoint_probe_register(call->name,
154 call->class->probe,
155 call);
156 case TRACE_REG_UNREGISTER:
157 tracepoint_probe_unregister(call->name,
158 call->class->probe,
159 call);
160 return 0;
161
162#ifdef CONFIG_PERF_EVENTS
163 case TRACE_REG_PERF_REGISTER:
164 return tracepoint_probe_register(call->name,
165 call->class->perf_probe,
166 call);
167 case TRACE_REG_PERF_UNREGISTER:
168 tracepoint_probe_unregister(call->name,
169 call->class->perf_probe,
170 call);
171 return 0;
172#endif
173 }
174 return 0;
175}
176EXPORT_SYMBOL_GPL(ftrace_event_reg);
177
e870e9a1
LZ
178void trace_event_enable_cmd_record(bool enable)
179{
180 struct ftrace_event_call *call;
181
182 mutex_lock(&event_mutex);
183 list_for_each_entry(call, &ftrace_events, list) {
184 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
185 continue;
186
187 if (enable) {
188 tracing_start_cmdline_record();
189 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
190 } else {
191 tracing_stop_cmdline_record();
192 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
193 }
194 }
195 mutex_unlock(&event_mutex);
196}
197
3b8e4273 198static int ftrace_event_enable_disable(struct ftrace_event_call *call,
fd994989
SR
199 int enable)
200{
3b8e4273
LZ
201 int ret = 0;
202
fd994989
SR
203 switch (enable) {
204 case 0:
553552ce
SR
205 if (call->flags & TRACE_EVENT_FL_ENABLED) {
206 call->flags &= ~TRACE_EVENT_FL_ENABLED;
e870e9a1
LZ
207 if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
208 tracing_stop_cmdline_record();
209 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
210 }
a1d0ce82 211 call->class->reg(call, TRACE_REG_UNREGISTER);
fd994989 212 }
fd994989
SR
213 break;
214 case 1:
553552ce 215 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
e870e9a1
LZ
216 if (trace_flags & TRACE_ITER_RECORD_CMD) {
217 tracing_start_cmdline_record();
218 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
219 }
a1d0ce82 220 ret = call->class->reg(call, TRACE_REG_REGISTER);
3b8e4273
LZ
221 if (ret) {
222 tracing_stop_cmdline_record();
223 pr_info("event trace: Could not enable event "
224 "%s\n", call->name);
225 break;
226 }
553552ce 227 call->flags |= TRACE_EVENT_FL_ENABLED;
fd994989 228 }
fd994989
SR
229 break;
230 }
3b8e4273
LZ
231
232 return ret;
fd994989
SR
233}
234
0e907c99
Z
235static void ftrace_clear_events(void)
236{
237 struct ftrace_event_call *call;
238
239 mutex_lock(&event_mutex);
240 list_for_each_entry(call, &ftrace_events, list) {
241 ftrace_event_enable_disable(call, 0);
242 }
243 mutex_unlock(&event_mutex);
244}
245
8f31bfe5
LZ
246/*
247 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
248 */
249static int __ftrace_set_clr_event(const char *match, const char *sub,
250 const char *event, int set)
b77e38aa 251{
a59fd602 252 struct ftrace_event_call *call;
29f93943 253 int ret = -EINVAL;
8f31bfe5
LZ
254
255 mutex_lock(&event_mutex);
256 list_for_each_entry(call, &ftrace_events, list) {
257
a1d0ce82 258 if (!call->name || !call->class || !call->class->reg)
8f31bfe5
LZ
259 continue;
260
261 if (match &&
262 strcmp(match, call->name) != 0 &&
8f082018 263 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
264 continue;
265
8f082018 266 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
267 continue;
268
269 if (event && strcmp(event, call->name) != 0)
270 continue;
271
272 ftrace_event_enable_disable(call, set);
273
274 ret = 0;
275 }
276 mutex_unlock(&event_mutex);
277
278 return ret;
279}
280
281static int ftrace_set_clr_event(char *buf, int set)
282{
b628b3e6 283 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
284
285 /*
286 * The buf format can be <subsystem>:<event-name>
287 * *:<event-name> means any event by that name.
288 * :<event-name> is the same.
289 *
290 * <subsystem>:* means all events in that subsystem
291 * <subsystem>: means the same.
292 *
293 * <name> (no ':') means all events in a subsystem with
294 * the name <name> or any event that matches <name>
295 */
296
297 match = strsep(&buf, ":");
298 if (buf) {
299 sub = match;
300 event = buf;
301 match = NULL;
302
303 if (!strlen(sub) || strcmp(sub, "*") == 0)
304 sub = NULL;
305 if (!strlen(event) || strcmp(event, "*") == 0)
306 event = NULL;
307 }
b77e38aa 308
8f31bfe5 309 return __ftrace_set_clr_event(match, sub, event, set);
b77e38aa
SR
310}
311
4671c794
SR
312/**
313 * trace_set_clr_event - enable or disable an event
314 * @system: system name to match (NULL for any system)
315 * @event: event name to match (NULL for all events, within system)
316 * @set: 1 to enable, 0 to disable
317 *
318 * This is a way for other parts of the kernel to enable or disable
319 * event recording.
320 *
321 * Returns 0 on success, -EINVAL if the parameters do not match any
322 * registered events.
323 */
324int trace_set_clr_event(const char *system, const char *event, int set)
325{
326 return __ftrace_set_clr_event(NULL, system, event, set);
327}
328
b77e38aa
SR
329/* 128 should be much more than enough */
330#define EVENT_BUF_SIZE 127
331
332static ssize_t
333ftrace_event_write(struct file *file, const char __user *ubuf,
334 size_t cnt, loff_t *ppos)
335{
48966364 336 struct trace_parser parser;
4ba7978e 337 ssize_t read, ret;
b77e38aa 338
4ba7978e 339 if (!cnt)
b77e38aa
SR
340 return 0;
341
1852fcce
SR
342 ret = tracing_update_buffers();
343 if (ret < 0)
344 return ret;
345
48966364 346 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
347 return -ENOMEM;
348
48966364 349 read = trace_get_user(&parser, ubuf, cnt, ppos);
350
4ba7978e 351 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 352 int set = 1;
b77e38aa 353
48966364 354 if (*parser.buffer == '!')
b77e38aa 355 set = 0;
b77e38aa 356
48966364 357 parser.buffer[parser.idx] = 0;
358
359 ret = ftrace_set_clr_event(parser.buffer + !set, set);
b77e38aa 360 if (ret)
48966364 361 goto out_put;
b77e38aa 362 }
b77e38aa
SR
363
364 ret = read;
365
48966364 366 out_put:
367 trace_parser_put(&parser);
b77e38aa
SR
368
369 return ret;
370}
371
372static void *
373t_next(struct seq_file *m, void *v, loff_t *pos)
374{
30bd39cd 375 struct ftrace_event_call *call = v;
b77e38aa
SR
376
377 (*pos)++;
378
30bd39cd 379 list_for_each_entry_continue(call, &ftrace_events, list) {
40e26815
SR
380 /*
381 * The ftrace subsystem is for showing formats only.
382 * They can not be enabled or disabled via the event files.
383 */
a1d0ce82 384 if (call->class && call->class->reg)
30bd39cd 385 return call;
40e26815 386 }
b77e38aa 387
30bd39cd 388 return NULL;
b77e38aa
SR
389}
390
391static void *t_start(struct seq_file *m, loff_t *pos)
392{
30bd39cd 393 struct ftrace_event_call *call;
e1c7e2a6
LZ
394 loff_t l;
395
20c8928a 396 mutex_lock(&event_mutex);
e1c7e2a6 397
30bd39cd 398 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 399 for (l = 0; l <= *pos; ) {
30bd39cd 400 call = t_next(m, call, &l);
e1c7e2a6
LZ
401 if (!call)
402 break;
403 }
404 return call;
b77e38aa
SR
405}
406
407static void *
408s_next(struct seq_file *m, void *v, loff_t *pos)
409{
30bd39cd 410 struct ftrace_event_call *call = v;
b77e38aa
SR
411
412 (*pos)++;
413
30bd39cd 414 list_for_each_entry_continue(call, &ftrace_events, list) {
553552ce 415 if (call->flags & TRACE_EVENT_FL_ENABLED)
30bd39cd 416 return call;
b77e38aa
SR
417 }
418
30bd39cd 419 return NULL;
b77e38aa
SR
420}
421
422static void *s_start(struct seq_file *m, loff_t *pos)
423{
30bd39cd 424 struct ftrace_event_call *call;
e1c7e2a6
LZ
425 loff_t l;
426
20c8928a 427 mutex_lock(&event_mutex);
e1c7e2a6 428
30bd39cd 429 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 430 for (l = 0; l <= *pos; ) {
30bd39cd 431 call = s_next(m, call, &l);
e1c7e2a6
LZ
432 if (!call)
433 break;
434 }
435 return call;
b77e38aa
SR
436}
437
438static int t_show(struct seq_file *m, void *v)
439{
440 struct ftrace_event_call *call = v;
441
8f082018
SR
442 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
443 seq_printf(m, "%s:", call->class->system);
b77e38aa
SR
444 seq_printf(m, "%s\n", call->name);
445
446 return 0;
447}
448
449static void t_stop(struct seq_file *m, void *p)
450{
20c8928a 451 mutex_unlock(&event_mutex);
b77e38aa
SR
452}
453
454static int
455ftrace_event_seq_open(struct inode *inode, struct file *file)
456{
b77e38aa
SR
457 const struct seq_operations *seq_ops;
458
459 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 460 (file->f_flags & O_TRUNC))
b77e38aa
SR
461 ftrace_clear_events();
462
463 seq_ops = inode->i_private;
20c8928a 464 return seq_open(file, seq_ops);
b77e38aa
SR
465}
466
1473e441
SR
467static ssize_t
468event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
469 loff_t *ppos)
470{
471 struct ftrace_event_call *call = filp->private_data;
472 char *buf;
473
553552ce 474 if (call->flags & TRACE_EVENT_FL_ENABLED)
1473e441
SR
475 buf = "1\n";
476 else
477 buf = "0\n";
478
479 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
480}
481
482static ssize_t
483event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
484 loff_t *ppos)
485{
486 struct ftrace_event_call *call = filp->private_data;
487 char buf[64];
488 unsigned long val;
489 int ret;
490
491 if (cnt >= sizeof(buf))
492 return -EINVAL;
493
494 if (copy_from_user(&buf, ubuf, cnt))
495 return -EFAULT;
496
497 buf[cnt] = 0;
498
499 ret = strict_strtoul(buf, 10, &val);
500 if (ret < 0)
501 return ret;
502
1852fcce
SR
503 ret = tracing_update_buffers();
504 if (ret < 0)
505 return ret;
506
1473e441
SR
507 switch (val) {
508 case 0:
1473e441 509 case 1:
11a241a3 510 mutex_lock(&event_mutex);
3b8e4273 511 ret = ftrace_event_enable_disable(call, val);
11a241a3 512 mutex_unlock(&event_mutex);
1473e441
SR
513 break;
514
515 default:
516 return -EINVAL;
517 }
518
519 *ppos += cnt;
520
3b8e4273 521 return ret ? ret : cnt;
1473e441
SR
522}
523
8ae79a13
SR
524static ssize_t
525system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
526 loff_t *ppos)
527{
c142b15d 528 const char set_to_char[4] = { '?', '0', '1', 'X' };
8ae79a13
SR
529 const char *system = filp->private_data;
530 struct ftrace_event_call *call;
531 char buf[2];
c142b15d 532 int set = 0;
8ae79a13
SR
533 int ret;
534
8ae79a13
SR
535 mutex_lock(&event_mutex);
536 list_for_each_entry(call, &ftrace_events, list) {
a1d0ce82 537 if (!call->name || !call->class || !call->class->reg)
8ae79a13
SR
538 continue;
539
8f082018 540 if (system && strcmp(call->class->system, system) != 0)
8ae79a13
SR
541 continue;
542
543 /*
544 * We need to find out if all the events are set
545 * or if all events or cleared, or if we have
546 * a mixture.
547 */
553552ce 548 set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
c142b15d 549
8ae79a13
SR
550 /*
551 * If we have a mixture, no need to look further.
552 */
c142b15d 553 if (set == 3)
8ae79a13
SR
554 break;
555 }
556 mutex_unlock(&event_mutex);
557
c142b15d 558 buf[0] = set_to_char[set];
8ae79a13 559 buf[1] = '\n';
8ae79a13
SR
560
561 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
562
563 return ret;
564}
565
566static ssize_t
567system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
568 loff_t *ppos)
569{
570 const char *system = filp->private_data;
571 unsigned long val;
8ae79a13
SR
572 char buf[64];
573 ssize_t ret;
574
575 if (cnt >= sizeof(buf))
576 return -EINVAL;
577
578 if (copy_from_user(&buf, ubuf, cnt))
579 return -EFAULT;
580
581 buf[cnt] = 0;
582
583 ret = strict_strtoul(buf, 10, &val);
584 if (ret < 0)
585 return ret;
586
587 ret = tracing_update_buffers();
588 if (ret < 0)
589 return ret;
590
8f31bfe5 591 if (val != 0 && val != 1)
8ae79a13 592 return -EINVAL;
8ae79a13 593
8f31bfe5 594 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
8ae79a13 595 if (ret)
8f31bfe5 596 goto out;
8ae79a13
SR
597
598 ret = cnt;
599
8f31bfe5 600out:
8ae79a13
SR
601 *ppos += cnt;
602
603 return ret;
604}
605
2a37a3df
SR
606enum {
607 FORMAT_HEADER = 1,
86397dc3
LZ
608 FORMAT_FIELD_SEPERATOR = 2,
609 FORMAT_PRINTFMT = 3,
2a37a3df
SR
610};
611
612static void *f_next(struct seq_file *m, void *v, loff_t *pos)
981d081e 613{
2a37a3df 614 struct ftrace_event_call *call = m->private;
5a65e956 615 struct ftrace_event_field *field;
86397dc3
LZ
616 struct list_head *common_head = &ftrace_common_fields;
617 struct list_head *head = trace_get_fields(call);
981d081e 618
2a37a3df 619 (*pos)++;
5a65e956 620
2a37a3df
SR
621 switch ((unsigned long)v) {
622 case FORMAT_HEADER:
86397dc3
LZ
623 if (unlikely(list_empty(common_head)))
624 return NULL;
625
626 field = list_entry(common_head->prev,
627 struct ftrace_event_field, link);
628 return field;
5a65e956 629
86397dc3 630 case FORMAT_FIELD_SEPERATOR:
2a37a3df
SR
631 if (unlikely(list_empty(head)))
632 return NULL;
5a65e956 633
2a37a3df
SR
634 field = list_entry(head->prev, struct ftrace_event_field, link);
635 return field;
5a65e956 636
2a37a3df
SR
637 case FORMAT_PRINTFMT:
638 /* all done */
639 return NULL;
5a65e956
LJ
640 }
641
2a37a3df 642 field = v;
86397dc3
LZ
643 if (field->link.prev == common_head)
644 return (void *)FORMAT_FIELD_SEPERATOR;
645 else if (field->link.prev == head)
2a37a3df
SR
646 return (void *)FORMAT_PRINTFMT;
647
648 field = list_entry(field->link.prev, struct ftrace_event_field, link);
649
2a37a3df 650 return field;
8728fe50 651}
5a65e956 652
2a37a3df 653static void *f_start(struct seq_file *m, loff_t *pos)
8728fe50 654{
2a37a3df
SR
655 loff_t l = 0;
656 void *p;
5a65e956 657
2a37a3df
SR
658 /* Start by showing the header */
659 if (!*pos)
660 return (void *)FORMAT_HEADER;
661
662 p = (void *)FORMAT_HEADER;
663 do {
664 p = f_next(m, p, &l);
665 } while (p && l < *pos);
666
667 return p;
668}
669
670static int f_show(struct seq_file *m, void *v)
671{
672 struct ftrace_event_call *call = m->private;
673 struct ftrace_event_field *field;
674 const char *array_descriptor;
675
676 switch ((unsigned long)v) {
677 case FORMAT_HEADER:
678 seq_printf(m, "name: %s\n", call->name);
679 seq_printf(m, "ID: %d\n", call->event.type);
680 seq_printf(m, "format:\n");
8728fe50 681 return 0;
5a65e956 682
86397dc3
LZ
683 case FORMAT_FIELD_SEPERATOR:
684 seq_putc(m, '\n');
685 return 0;
686
2a37a3df
SR
687 case FORMAT_PRINTFMT:
688 seq_printf(m, "\nprint fmt: %s\n",
689 call->print_fmt);
690 return 0;
981d081e 691 }
8728fe50 692
2a37a3df 693 field = v;
8728fe50 694
2a37a3df
SR
695 /*
696 * Smartly shows the array type(except dynamic array).
697 * Normal:
698 * field:TYPE VAR
699 * If TYPE := TYPE[LEN], it is shown:
700 * field:TYPE VAR[LEN]
701 */
702 array_descriptor = strchr(field->type, '[');
8728fe50 703
2a37a3df
SR
704 if (!strncmp(field->type, "__data_loc", 10))
705 array_descriptor = NULL;
8728fe50 706
2a37a3df
SR
707 if (!array_descriptor)
708 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
709 field->type, field->name, field->offset,
710 field->size, !!field->is_signed);
711 else
712 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
713 (int)(array_descriptor - field->type),
714 field->type, field->name,
715 array_descriptor, field->offset,
716 field->size, !!field->is_signed);
8728fe50 717
2a37a3df
SR
718 return 0;
719}
5a65e956 720
2a37a3df
SR
721static void f_stop(struct seq_file *m, void *p)
722{
723}
981d081e 724
2a37a3df
SR
725static const struct seq_operations trace_format_seq_ops = {
726 .start = f_start,
727 .next = f_next,
728 .stop = f_stop,
729 .show = f_show,
730};
731
732static int trace_format_open(struct inode *inode, struct file *file)
733{
734 struct ftrace_event_call *call = inode->i_private;
735 struct seq_file *m;
736 int ret;
737
738 ret = seq_open(file, &trace_format_seq_ops);
739 if (ret < 0)
740 return ret;
741
742 m = file->private_data;
743 m->private = call;
744
745 return 0;
981d081e
SR
746}
747
23725aee
PZ
748static ssize_t
749event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
750{
751 struct ftrace_event_call *call = filp->private_data;
752 struct trace_seq *s;
753 int r;
754
755 if (*ppos)
756 return 0;
757
758 s = kmalloc(sizeof(*s), GFP_KERNEL);
759 if (!s)
760 return -ENOMEM;
761
762 trace_seq_init(s);
32c0edae 763 trace_seq_printf(s, "%d\n", call->event.type);
23725aee
PZ
764
765 r = simple_read_from_buffer(ubuf, cnt, ppos,
766 s->buffer, s->len);
767 kfree(s);
768 return r;
769}
770
7ce7e424
TZ
771static ssize_t
772event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
773 loff_t *ppos)
774{
775 struct ftrace_event_call *call = filp->private_data;
776 struct trace_seq *s;
777 int r;
778
779 if (*ppos)
780 return 0;
781
782 s = kmalloc(sizeof(*s), GFP_KERNEL);
783 if (!s)
784 return -ENOMEM;
785
786 trace_seq_init(s);
787
8b372562 788 print_event_filter(call, s);
4bda2d51 789 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
790
791 kfree(s);
792
793 return r;
794}
795
796static ssize_t
797event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
798 loff_t *ppos)
799{
800 struct ftrace_event_call *call = filp->private_data;
8b372562 801 char *buf;
7ce7e424
TZ
802 int err;
803
8b372562 804 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
805 return -EINVAL;
806
8b372562
TZ
807 buf = (char *)__get_free_page(GFP_TEMPORARY);
808 if (!buf)
7ce7e424
TZ
809 return -ENOMEM;
810
8b372562
TZ
811 if (copy_from_user(buf, ubuf, cnt)) {
812 free_page((unsigned long) buf);
813 return -EFAULT;
7ce7e424 814 }
8b372562 815 buf[cnt] = '\0';
7ce7e424 816
8b372562
TZ
817 err = apply_event_filter(call, buf);
818 free_page((unsigned long) buf);
819 if (err < 0)
44e9c8b7 820 return err;
0a19e53c 821
7ce7e424
TZ
822 *ppos += cnt;
823
824 return cnt;
825}
826
cfb180f3
TZ
827static ssize_t
828subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
829 loff_t *ppos)
830{
831 struct event_subsystem *system = filp->private_data;
832 struct trace_seq *s;
833 int r;
834
835 if (*ppos)
836 return 0;
837
838 s = kmalloc(sizeof(*s), GFP_KERNEL);
839 if (!s)
840 return -ENOMEM;
841
842 trace_seq_init(s);
843
8b372562 844 print_subsystem_event_filter(system, s);
4bda2d51 845 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
846
847 kfree(s);
848
849 return r;
850}
851
852static ssize_t
853subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
854 loff_t *ppos)
855{
856 struct event_subsystem *system = filp->private_data;
8b372562 857 char *buf;
cfb180f3
TZ
858 int err;
859
8b372562 860 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
861 return -EINVAL;
862
8b372562
TZ
863 buf = (char *)__get_free_page(GFP_TEMPORARY);
864 if (!buf)
cfb180f3
TZ
865 return -ENOMEM;
866
8b372562
TZ
867 if (copy_from_user(buf, ubuf, cnt)) {
868 free_page((unsigned long) buf);
869 return -EFAULT;
cfb180f3 870 }
8b372562 871 buf[cnt] = '\0';
cfb180f3 872
8b372562
TZ
873 err = apply_subsystem_event_filter(system, buf);
874 free_page((unsigned long) buf);
875 if (err < 0)
44e9c8b7 876 return err;
cfb180f3
TZ
877
878 *ppos += cnt;
879
880 return cnt;
881}
882
d1b182a8
SR
883static ssize_t
884show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
885{
886 int (*func)(struct trace_seq *s) = filp->private_data;
887 struct trace_seq *s;
888 int r;
889
890 if (*ppos)
891 return 0;
892
893 s = kmalloc(sizeof(*s), GFP_KERNEL);
894 if (!s)
895 return -ENOMEM;
896
897 trace_seq_init(s);
898
899 func(s);
900 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
901
902 kfree(s);
903
904 return r;
905}
906
b77e38aa
SR
907static const struct seq_operations show_event_seq_ops = {
908 .start = t_start,
909 .next = t_next,
910 .show = t_show,
911 .stop = t_stop,
912};
913
914static const struct seq_operations show_set_event_seq_ops = {
915 .start = s_start,
916 .next = s_next,
917 .show = t_show,
918 .stop = t_stop,
919};
920
2314c4ae
SR
921static const struct file_operations ftrace_avail_fops = {
922 .open = ftrace_event_seq_open,
923 .read = seq_read,
924 .llseek = seq_lseek,
925 .release = seq_release,
926};
927
b77e38aa
SR
928static const struct file_operations ftrace_set_event_fops = {
929 .open = ftrace_event_seq_open,
930 .read = seq_read,
931 .write = ftrace_event_write,
932 .llseek = seq_lseek,
933 .release = seq_release,
934};
935
1473e441
SR
936static const struct file_operations ftrace_enable_fops = {
937 .open = tracing_open_generic,
938 .read = event_enable_read,
939 .write = event_enable_write,
6038f373 940 .llseek = default_llseek,
1473e441
SR
941};
942
981d081e 943static const struct file_operations ftrace_event_format_fops = {
2a37a3df
SR
944 .open = trace_format_open,
945 .read = seq_read,
946 .llseek = seq_lseek,
947 .release = seq_release,
981d081e
SR
948};
949
23725aee
PZ
950static const struct file_operations ftrace_event_id_fops = {
951 .open = tracing_open_generic,
952 .read = event_id_read,
6038f373 953 .llseek = default_llseek,
23725aee
PZ
954};
955
7ce7e424
TZ
956static const struct file_operations ftrace_event_filter_fops = {
957 .open = tracing_open_generic,
958 .read = event_filter_read,
959 .write = event_filter_write,
6038f373 960 .llseek = default_llseek,
7ce7e424
TZ
961};
962
cfb180f3
TZ
963static const struct file_operations ftrace_subsystem_filter_fops = {
964 .open = tracing_open_generic,
965 .read = subsystem_filter_read,
966 .write = subsystem_filter_write,
6038f373 967 .llseek = default_llseek,
cfb180f3
TZ
968};
969
8ae79a13
SR
970static const struct file_operations ftrace_system_enable_fops = {
971 .open = tracing_open_generic,
972 .read = system_enable_read,
973 .write = system_enable_write,
6038f373 974 .llseek = default_llseek,
8ae79a13
SR
975};
976
d1b182a8
SR
977static const struct file_operations ftrace_show_header_fops = {
978 .open = tracing_open_generic,
979 .read = show_header,
6038f373 980 .llseek = default_llseek,
d1b182a8
SR
981};
982
1473e441
SR
983static struct dentry *event_trace_events_dir(void)
984{
985 static struct dentry *d_tracer;
986 static struct dentry *d_events;
987
988 if (d_events)
989 return d_events;
990
991 d_tracer = tracing_init_dentry();
992 if (!d_tracer)
993 return NULL;
994
995 d_events = debugfs_create_dir("events", d_tracer);
996 if (!d_events)
997 pr_warning("Could not create debugfs "
998 "'events' directory\n");
999
1000 return d_events;
1001}
1002
6ecc2d1c
SR
1003static LIST_HEAD(event_subsystems);
1004
1005static struct dentry *
1006event_subsystem_dir(const char *name, struct dentry *d_events)
1007{
1008 struct event_subsystem *system;
e1112b4d 1009 struct dentry *entry;
6ecc2d1c
SR
1010
1011 /* First see if we did not already create this dir */
1012 list_for_each_entry(system, &event_subsystems, list) {
dc82ec98
XG
1013 if (strcmp(system->name, name) == 0) {
1014 system->nr_events++;
6ecc2d1c 1015 return system->entry;
dc82ec98 1016 }
6ecc2d1c
SR
1017 }
1018
1019 /* need to create new entry */
1020 system = kmalloc(sizeof(*system), GFP_KERNEL);
1021 if (!system) {
1022 pr_warning("No memory to create event subsystem %s\n",
1023 name);
1024 return d_events;
1025 }
1026
1027 system->entry = debugfs_create_dir(name, d_events);
1028 if (!system->entry) {
1029 pr_warning("Could not create event subsystem %s\n",
1030 name);
1031 kfree(system);
1032 return d_events;
1033 }
1034
dc82ec98 1035 system->nr_events = 1;
6d723736
SR
1036 system->name = kstrdup(name, GFP_KERNEL);
1037 if (!system->name) {
1038 debugfs_remove(system->entry);
1039 kfree(system);
1040 return d_events;
1041 }
1042
6ecc2d1c
SR
1043 list_add(&system->list, &event_subsystems);
1044
30e673b2 1045 system->filter = NULL;
cfb180f3 1046
8b372562
TZ
1047 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1048 if (!system->filter) {
1049 pr_warning("Could not allocate filter for subsystem "
1050 "'%s'\n", name);
1051 return system->entry;
1052 }
1053
e1112b4d
TZ
1054 entry = debugfs_create_file("filter", 0644, system->entry, system,
1055 &ftrace_subsystem_filter_fops);
8b372562
TZ
1056 if (!entry) {
1057 kfree(system->filter);
1058 system->filter = NULL;
e1112b4d
TZ
1059 pr_warning("Could not create debugfs "
1060 "'%s/filter' entry\n", name);
8b372562 1061 }
e1112b4d 1062
f3f3f009
FW
1063 trace_create_file("enable", 0644, system->entry,
1064 (void *)system->name,
1065 &ftrace_system_enable_fops);
8ae79a13 1066
6ecc2d1c
SR
1067 return system->entry;
1068}
1069
1473e441 1070static int
701970b3
SR
1071event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1072 const struct file_operations *id,
1073 const struct file_operations *enable,
1074 const struct file_operations *filter,
1075 const struct file_operations *format)
1473e441 1076{
2e33af02 1077 struct list_head *head;
fd994989 1078 int ret;
1473e441 1079
6ecc2d1c
SR
1080 /*
1081 * If the trace point header did not define TRACE_SYSTEM
1082 * then the system would be called "TRACE_SYSTEM".
1083 */
8f082018
SR
1084 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1085 d_events = event_subsystem_dir(call->class->system, d_events);
6ecc2d1c 1086
1473e441
SR
1087 call->dir = debugfs_create_dir(call->name, d_events);
1088 if (!call->dir) {
1089 pr_warning("Could not create debugfs "
1090 "'%s' directory\n", call->name);
1091 return -1;
1092 }
1093
a1d0ce82 1094 if (call->class->reg)
f3f3f009
FW
1095 trace_create_file("enable", 0644, call->dir, call,
1096 enable);
1473e441 1097
2239291a 1098#ifdef CONFIG_PERF_EVENTS
a1d0ce82 1099 if (call->event.type && call->class->reg)
f3f3f009
FW
1100 trace_create_file("id", 0444, call->dir, call,
1101 id);
2239291a 1102#endif
23725aee 1103
c9d932cf
LZ
1104 /*
1105 * Other events may have the same class. Only update
1106 * the fields if they are not already defined.
1107 */
1108 head = trace_get_fields(call);
1109 if (list_empty(head)) {
1110 ret = call->class->define_fields(call);
1111 if (ret < 0) {
1112 pr_warning("Could not initialize trace point"
1113 " events/%s\n", call->name);
1114 return ret;
cf027f64
TZ
1115 }
1116 }
c9d932cf
LZ
1117 trace_create_file("filter", 0644, call->dir, call,
1118 filter);
cf027f64 1119
f3f3f009
FW
1120 trace_create_file("format", 0444, call->dir, call,
1121 format);
6d723736
SR
1122
1123 return 0;
1124}
1125
67ead0a6
LZ
1126static int
1127__trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1128 const struct file_operations *id,
1129 const struct file_operations *enable,
1130 const struct file_operations *filter,
1131 const struct file_operations *format)
bd1a5c84
MH
1132{
1133 struct dentry *d_events;
1134 int ret;
6d723736 1135
67ead0a6 1136 /* The linker may leave blanks */
bd1a5c84
MH
1137 if (!call->name)
1138 return -EINVAL;
701970b3 1139
0405ab80
SR
1140 if (call->class->raw_init) {
1141 ret = call->class->raw_init(call);
bd1a5c84
MH
1142 if (ret < 0) {
1143 if (ret != -ENOSYS)
67ead0a6
LZ
1144 pr_warning("Could not initialize trace events/%s\n",
1145 call->name);
bd1a5c84
MH
1146 return ret;
1147 }
1148 }
701970b3 1149
bd1a5c84
MH
1150 d_events = event_trace_events_dir();
1151 if (!d_events)
1152 return -ENOENT;
1153
67ead0a6 1154 ret = event_create_dir(call, d_events, id, enable, filter, format);
88f70d75
MH
1155 if (!ret)
1156 list_add(&call->list, &ftrace_events);
67ead0a6 1157 call->mod = mod;
88f70d75 1158
588bebb7 1159 return ret;
bd1a5c84
MH
1160}
1161
1162/* Add an additional event_call dynamically */
1163int trace_add_event_call(struct ftrace_event_call *call)
1164{
1165 int ret;
1166 mutex_lock(&event_mutex);
67ead0a6
LZ
1167 ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1168 &ftrace_enable_fops,
1169 &ftrace_event_filter_fops,
1170 &ftrace_event_format_fops);
bd1a5c84
MH
1171 mutex_unlock(&event_mutex);
1172 return ret;
1173}
701970b3 1174
a2ca5e03
FW
1175static void remove_subsystem_dir(const char *name)
1176{
1177 struct event_subsystem *system;
1178
1179 if (strcmp(name, TRACE_SYSTEM) == 0)
1180 return;
1181
1182 list_for_each_entry(system, &event_subsystems, list) {
1183 if (strcmp(system->name, name) == 0) {
1184 if (!--system->nr_events) {
1185 struct event_filter *filter = system->filter;
1186
1187 debugfs_remove_recursive(system->entry);
1188 list_del(&system->list);
1189 if (filter) {
1190 kfree(filter->filter_string);
1191 kfree(filter);
1192 }
1193 kfree(system->name);
1194 kfree(system);
1195 }
1196 break;
1197 }
1198 }
1199}
1200
4fead8e4
MH
1201/*
1202 * Must be called under locking both of event_mutex and trace_event_mutex.
1203 */
bd1a5c84
MH
1204static void __trace_remove_event_call(struct ftrace_event_call *call)
1205{
1206 ftrace_event_enable_disable(call, 0);
80decc70
SR
1207 if (call->event.funcs)
1208 __unregister_ftrace_event(&call->event);
bd1a5c84
MH
1209 debugfs_remove_recursive(call->dir);
1210 list_del(&call->list);
1211 trace_destroy_fields(call);
1212 destroy_preds(call);
8f082018 1213 remove_subsystem_dir(call->class->system);
bd1a5c84
MH
1214}
1215
1216/* Remove an event_call */
1217void trace_remove_event_call(struct ftrace_event_call *call)
1218{
1219 mutex_lock(&event_mutex);
4fead8e4 1220 down_write(&trace_event_mutex);
bd1a5c84 1221 __trace_remove_event_call(call);
4fead8e4 1222 up_write(&trace_event_mutex);
bd1a5c84
MH
1223 mutex_unlock(&event_mutex);
1224}
1225
1226#define for_each_event(event, start, end) \
1227 for (event = start; \
1228 (unsigned long)event < (unsigned long)end; \
1229 event++)
1230
1231#ifdef CONFIG_MODULES
1232
1233static LIST_HEAD(ftrace_module_file_list);
1234
1235/*
1236 * Modules must own their file_operations to keep up with
1237 * reference counting.
1238 */
1239struct ftrace_module_file_ops {
1240 struct list_head list;
1241 struct module *mod;
1242 struct file_operations id;
1243 struct file_operations enable;
1244 struct file_operations format;
1245 struct file_operations filter;
1246};
1247
701970b3
SR
1248static struct ftrace_module_file_ops *
1249trace_create_file_ops(struct module *mod)
1250{
1251 struct ftrace_module_file_ops *file_ops;
1252
1253 /*
1254 * This is a bit of a PITA. To allow for correct reference
1255 * counting, modules must "own" their file_operations.
1256 * To do this, we allocate the file operations that will be
1257 * used in the event directory.
1258 */
1259
1260 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1261 if (!file_ops)
1262 return NULL;
1263
1264 file_ops->mod = mod;
1265
1266 file_ops->id = ftrace_event_id_fops;
1267 file_ops->id.owner = mod;
1268
1269 file_ops->enable = ftrace_enable_fops;
1270 file_ops->enable.owner = mod;
1271
1272 file_ops->filter = ftrace_event_filter_fops;
1273 file_ops->filter.owner = mod;
1274
1275 file_ops->format = ftrace_event_format_fops;
1276 file_ops->format.owner = mod;
1277
1278 list_add(&file_ops->list, &ftrace_module_file_list);
1279
1280 return file_ops;
1281}
1282
6d723736
SR
1283static void trace_module_add_events(struct module *mod)
1284{
701970b3 1285 struct ftrace_module_file_ops *file_ops = NULL;
e4a9ea5e 1286 struct ftrace_event_call **call, **start, **end;
6d723736
SR
1287
1288 start = mod->trace_events;
1289 end = mod->trace_events + mod->num_trace_events;
1290
1291 if (start == end)
1292 return;
1293
67ead0a6
LZ
1294 file_ops = trace_create_file_ops(mod);
1295 if (!file_ops)
6d723736
SR
1296 return;
1297
1298 for_each_event(call, start, end) {
e4a9ea5e 1299 __trace_add_event_call(*call, mod,
88f70d75
MH
1300 &file_ops->id, &file_ops->enable,
1301 &file_ops->filter, &file_ops->format);
6d723736
SR
1302 }
1303}
1304
1305static void trace_module_remove_events(struct module *mod)
1306{
701970b3 1307 struct ftrace_module_file_ops *file_ops;
6d723736 1308 struct ftrace_event_call *call, *p;
9456f0fa 1309 bool found = false;
6d723736 1310
110bf2b7 1311 down_write(&trace_event_mutex);
6d723736
SR
1312 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1313 if (call->mod == mod) {
9456f0fa 1314 found = true;
bd1a5c84 1315 __trace_remove_event_call(call);
6d723736
SR
1316 }
1317 }
701970b3
SR
1318
1319 /* Now free the file_operations */
1320 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1321 if (file_ops->mod == mod)
1322 break;
1323 }
1324 if (&file_ops->list != &ftrace_module_file_list) {
1325 list_del(&file_ops->list);
1326 kfree(file_ops);
1327 }
9456f0fa
SR
1328
1329 /*
1330 * It is safest to reset the ring buffer if the module being unloaded
1331 * registered any events.
1332 */
1333 if (found)
1334 tracing_reset_current_online_cpus();
110bf2b7 1335 up_write(&trace_event_mutex);
6d723736
SR
1336}
1337
61f919a1
SR
1338static int trace_module_notify(struct notifier_block *self,
1339 unsigned long val, void *data)
6d723736
SR
1340{
1341 struct module *mod = data;
1342
1343 mutex_lock(&event_mutex);
1344 switch (val) {
1345 case MODULE_STATE_COMING:
1346 trace_module_add_events(mod);
1347 break;
1348 case MODULE_STATE_GOING:
1349 trace_module_remove_events(mod);
1350 break;
1351 }
1352 mutex_unlock(&event_mutex);
fd994989 1353
1473e441
SR
1354 return 0;
1355}
61f919a1
SR
1356#else
1357static int trace_module_notify(struct notifier_block *self,
1358 unsigned long val, void *data)
1359{
1360 return 0;
1361}
1362#endif /* CONFIG_MODULES */
1473e441 1363
ec827c7e 1364static struct notifier_block trace_module_nb = {
6d723736
SR
1365 .notifier_call = trace_module_notify,
1366 .priority = 0,
1367};
1368
e4a9ea5e
SR
1369extern struct ftrace_event_call *__start_ftrace_events[];
1370extern struct ftrace_event_call *__stop_ftrace_events[];
a59fd602 1371
020e5f85
LZ
1372static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1373
1374static __init int setup_trace_event(char *str)
1375{
1376 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1377 ring_buffer_expanded = 1;
1378 tracing_selftest_disabled = 1;
1379
1380 return 1;
1381}
1382__setup("trace_event=", setup_trace_event);
1383
b77e38aa
SR
1384static __init int event_trace_init(void)
1385{
e4a9ea5e 1386 struct ftrace_event_call **call;
b77e38aa
SR
1387 struct dentry *d_tracer;
1388 struct dentry *entry;
1473e441 1389 struct dentry *d_events;
6d723736 1390 int ret;
020e5f85
LZ
1391 char *buf = bootup_event_buf;
1392 char *token;
b77e38aa
SR
1393
1394 d_tracer = tracing_init_dentry();
1395 if (!d_tracer)
1396 return 0;
1397
2314c4ae
SR
1398 entry = debugfs_create_file("available_events", 0444, d_tracer,
1399 (void *)&show_event_seq_ops,
1400 &ftrace_avail_fops);
1401 if (!entry)
1402 pr_warning("Could not create debugfs "
1403 "'available_events' entry\n");
1404
b77e38aa
SR
1405 entry = debugfs_create_file("set_event", 0644, d_tracer,
1406 (void *)&show_set_event_seq_ops,
1407 &ftrace_set_event_fops);
1408 if (!entry)
1409 pr_warning("Could not create debugfs "
1410 "'set_event' entry\n");
1411
1473e441
SR
1412 d_events = event_trace_events_dir();
1413 if (!d_events)
1414 return 0;
1415
d1b182a8
SR
1416 /* ring buffer internal formats */
1417 trace_create_file("header_page", 0444, d_events,
1418 ring_buffer_print_page_header,
1419 &ftrace_show_header_fops);
1420
1421 trace_create_file("header_event", 0444, d_events,
1422 ring_buffer_print_entry_header,
1423 &ftrace_show_header_fops);
1424
8ae79a13 1425 trace_create_file("enable", 0644, d_events,
8f31bfe5 1426 NULL, &ftrace_system_enable_fops);
8ae79a13 1427
8728fe50
LZ
1428 if (trace_define_common_fields())
1429 pr_warning("tracing: Failed to allocate common fields");
1430
6d723736 1431 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
e4a9ea5e 1432 __trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
88f70d75
MH
1433 &ftrace_enable_fops,
1434 &ftrace_event_filter_fops,
1435 &ftrace_event_format_fops);
1473e441
SR
1436 }
1437
020e5f85
LZ
1438 while (true) {
1439 token = strsep(&buf, ",");
1440
1441 if (!token)
1442 break;
1443 if (!*token)
1444 continue;
1445
1446 ret = ftrace_set_clr_event(token, 1);
1447 if (ret)
1448 pr_warning("Failed to enable trace event: %s\n", token);
1449 }
1450
6d723736 1451 ret = register_module_notifier(&trace_module_nb);
55379376 1452 if (ret)
6d723736
SR
1453 pr_warning("Failed to register trace events module notifier\n");
1454
b77e38aa
SR
1455 return 0;
1456}
1457fs_initcall(event_trace_init);
e6187007
SR
1458
1459#ifdef CONFIG_FTRACE_STARTUP_TEST
1460
1461static DEFINE_SPINLOCK(test_spinlock);
1462static DEFINE_SPINLOCK(test_spinlock_irq);
1463static DEFINE_MUTEX(test_mutex);
1464
1465static __init void test_work(struct work_struct *dummy)
1466{
1467 spin_lock(&test_spinlock);
1468 spin_lock_irq(&test_spinlock_irq);
1469 udelay(1);
1470 spin_unlock_irq(&test_spinlock_irq);
1471 spin_unlock(&test_spinlock);
1472
1473 mutex_lock(&test_mutex);
1474 msleep(1);
1475 mutex_unlock(&test_mutex);
1476}
1477
1478static __init int event_test_thread(void *unused)
1479{
1480 void *test_malloc;
1481
1482 test_malloc = kmalloc(1234, GFP_KERNEL);
1483 if (!test_malloc)
1484 pr_info("failed to kmalloc\n");
1485
1486 schedule_on_each_cpu(test_work);
1487
1488 kfree(test_malloc);
1489
1490 set_current_state(TASK_INTERRUPTIBLE);
1491 while (!kthread_should_stop())
1492 schedule();
1493
1494 return 0;
1495}
1496
1497/*
1498 * Do various things that may trigger events.
1499 */
1500static __init void event_test_stuff(void)
1501{
1502 struct task_struct *test_thread;
1503
1504 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1505 msleep(1);
1506 kthread_stop(test_thread);
1507}
1508
1509/*
1510 * For every trace event defined, we will test each trace point separately,
1511 * and then by groups, and finally all trace points.
1512 */
9ea21c1e 1513static __init void event_trace_self_tests(void)
e6187007
SR
1514{
1515 struct ftrace_event_call *call;
1516 struct event_subsystem *system;
e6187007
SR
1517 int ret;
1518
1519 pr_info("Running tests on trace events:\n");
1520
1521 list_for_each_entry(call, &ftrace_events, list) {
1522
2239291a
SR
1523 /* Only test those that have a probe */
1524 if (!call->class || !call->class->probe)
e6187007
SR
1525 continue;
1526
1f5a6b45
SR
1527/*
1528 * Testing syscall events here is pretty useless, but
1529 * we still do it if configured. But this is time consuming.
1530 * What we really need is a user thread to perform the
1531 * syscalls as we test.
1532 */
1533#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
1534 if (call->class->system &&
1535 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
1536 continue;
1537#endif
1538
e6187007
SR
1539 pr_info("Testing event %s: ", call->name);
1540
1541 /*
1542 * If an event is already enabled, someone is using
1543 * it and the self test should not be on.
1544 */
553552ce 1545 if (call->flags & TRACE_EVENT_FL_ENABLED) {
e6187007
SR
1546 pr_warning("Enabled event during self test!\n");
1547 WARN_ON_ONCE(1);
1548 continue;
1549 }
1550
0e907c99 1551 ftrace_event_enable_disable(call, 1);
e6187007 1552 event_test_stuff();
0e907c99 1553 ftrace_event_enable_disable(call, 0);
e6187007
SR
1554
1555 pr_cont("OK\n");
1556 }
1557
1558 /* Now test at the sub system level */
1559
1560 pr_info("Running tests on trace event systems:\n");
1561
1562 list_for_each_entry(system, &event_subsystems, list) {
1563
1564 /* the ftrace system is special, skip it */
1565 if (strcmp(system->name, "ftrace") == 0)
1566 continue;
1567
1568 pr_info("Testing event system %s: ", system->name);
1569
8f31bfe5 1570 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
e6187007
SR
1571 if (WARN_ON_ONCE(ret)) {
1572 pr_warning("error enabling system %s\n",
1573 system->name);
1574 continue;
1575 }
1576
1577 event_test_stuff();
1578
8f31bfe5 1579 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
e6187007
SR
1580 if (WARN_ON_ONCE(ret))
1581 pr_warning("error disabling system %s\n",
1582 system->name);
1583
1584 pr_cont("OK\n");
1585 }
1586
1587 /* Test with all events enabled */
1588
1589 pr_info("Running tests on all trace events:\n");
1590 pr_info("Testing all events: ");
1591
8f31bfe5 1592 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
e6187007 1593 if (WARN_ON_ONCE(ret)) {
e6187007 1594 pr_warning("error enabling all events\n");
9ea21c1e 1595 return;
e6187007
SR
1596 }
1597
1598 event_test_stuff();
1599
1600 /* reset sysname */
8f31bfe5 1601 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
e6187007
SR
1602 if (WARN_ON_ONCE(ret)) {
1603 pr_warning("error disabling all events\n");
9ea21c1e 1604 return;
e6187007
SR
1605 }
1606
1607 pr_cont("OK\n");
9ea21c1e
SR
1608}
1609
1610#ifdef CONFIG_FUNCTION_TRACER
1611
245b2e70 1612static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
1613
1614static void
1615function_test_events_call(unsigned long ip, unsigned long parent_ip)
1616{
1617 struct ring_buffer_event *event;
e77405ad 1618 struct ring_buffer *buffer;
9ea21c1e
SR
1619 struct ftrace_entry *entry;
1620 unsigned long flags;
1621 long disabled;
9ea21c1e
SR
1622 int cpu;
1623 int pc;
1624
1625 pc = preempt_count();
5168ae50 1626 preempt_disable_notrace();
9ea21c1e 1627 cpu = raw_smp_processor_id();
245b2e70 1628 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
1629
1630 if (disabled != 1)
1631 goto out;
1632
1633 local_save_flags(flags);
1634
e77405ad
SR
1635 event = trace_current_buffer_lock_reserve(&buffer,
1636 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
1637 flags, pc);
1638 if (!event)
1639 goto out;
1640 entry = ring_buffer_event_data(event);
1641 entry->ip = ip;
1642 entry->parent_ip = parent_ip;
1643
e77405ad 1644 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
1645
1646 out:
245b2e70 1647 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5168ae50 1648 preempt_enable_notrace();
9ea21c1e
SR
1649}
1650
1651static struct ftrace_ops trace_ops __initdata =
1652{
1653 .func = function_test_events_call,
1654};
1655
1656static __init void event_trace_self_test_with_function(void)
1657{
1658 register_ftrace_function(&trace_ops);
1659 pr_info("Running tests again, along with the function tracer\n");
1660 event_trace_self_tests();
1661 unregister_ftrace_function(&trace_ops);
1662}
1663#else
1664static __init void event_trace_self_test_with_function(void)
1665{
1666}
1667#endif
1668
1669static __init int event_trace_self_tests_init(void)
1670{
020e5f85
LZ
1671 if (!tracing_selftest_disabled) {
1672 event_trace_self_tests();
1673 event_trace_self_test_with_function();
1674 }
e6187007
SR
1675
1676 return 0;
1677}
1678
28d20e2d 1679late_initcall(event_trace_self_tests_init);
e6187007
SR
1680
1681#endif
This page took 0.229722 seconds and 5 git commands to generate.