tracing: Move fields from event to class structure
[deliverable/linux.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
5a0e3ad6 18#include <linux/slab.h>
e6187007 19#include <linux/delay.h>
b77e38aa 20
020e5f85
LZ
21#include <asm/setup.h>
22
91729ef9 23#include "trace_output.h"
b77e38aa 24
4e5292ea 25#undef TRACE_SYSTEM
b628b3e6
SR
26#define TRACE_SYSTEM "TRACE_SYSTEM"
27
20c8928a 28DEFINE_MUTEX(event_mutex);
11a241a3 29
a59fd602
SR
30LIST_HEAD(ftrace_events);
31
2e33af02
SR
32struct list_head *
33trace_get_fields(struct ftrace_event_call *event_call)
34{
35 if (!event_call->class->get_fields)
36 return &event_call->class->fields;
37 return event_call->class->get_fields(event_call);
38}
39
aeaeae11
FW
40int trace_define_field(struct ftrace_event_call *call, const char *type,
41 const char *name, int offset, int size, int is_signed,
43b51ead 42 int filter_type)
cf027f64
TZ
43{
44 struct ftrace_event_field *field;
2e33af02
SR
45 struct list_head *head;
46
47 if (WARN_ON(!call->class))
48 return 0;
cf027f64 49
fe9f57f2 50 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
51 if (!field)
52 goto err;
fe9f57f2 53
cf027f64
TZ
54 field->name = kstrdup(name, GFP_KERNEL);
55 if (!field->name)
56 goto err;
fe9f57f2 57
cf027f64
TZ
58 field->type = kstrdup(type, GFP_KERNEL);
59 if (!field->type)
60 goto err;
fe9f57f2 61
43b51ead
LZ
62 if (filter_type == FILTER_OTHER)
63 field->filter_type = filter_assign_type(type);
64 else
65 field->filter_type = filter_type;
66
cf027f64
TZ
67 field->offset = offset;
68 field->size = size;
a118e4d1 69 field->is_signed = is_signed;
aa38e9fc 70
2e33af02
SR
71 head = trace_get_fields(call);
72 list_add(&field->link, head);
cf027f64
TZ
73
74 return 0;
fe9f57f2 75
cf027f64 76err:
7b60997f 77 if (field)
cf027f64 78 kfree(field->name);
cf027f64 79 kfree(field);
fe9f57f2 80
cf027f64
TZ
81 return -ENOMEM;
82}
17c873ec 83EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 84
e647d6b3
LZ
85#define __common_field(type, item) \
86 ret = trace_define_field(call, #type, "common_" #item, \
87 offsetof(typeof(ent), item), \
88 sizeof(ent.item), \
43b51ead 89 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
90 if (ret) \
91 return ret;
92
614a71a2 93static int trace_define_common_fields(struct ftrace_event_call *call)
e647d6b3
LZ
94{
95 int ret;
96 struct trace_entry ent;
97
98 __common_field(unsigned short, type);
99 __common_field(unsigned char, flags);
100 __common_field(unsigned char, preempt_count);
101 __common_field(int, pid);
637e7e86 102 __common_field(int, lock_depth);
e647d6b3
LZ
103
104 return ret;
105}
106
bd1a5c84 107void trace_destroy_fields(struct ftrace_event_call *call)
2df75e41
LZ
108{
109 struct ftrace_event_field *field, *next;
2e33af02 110 struct list_head *head;
2df75e41 111
2e33af02
SR
112 head = trace_get_fields(call);
113 list_for_each_entry_safe(field, next, head, link) {
2df75e41
LZ
114 list_del(&field->link);
115 kfree(field->type);
116 kfree(field->name);
117 kfree(field);
118 }
119}
120
87d9b4e1
LZ
121int trace_event_raw_init(struct ftrace_event_call *call)
122{
123 int id;
124
125 id = register_ftrace_event(call->event);
126 if (!id)
127 return -ENODEV;
128 call->id = id;
87d9b4e1
LZ
129
130 return 0;
131}
132EXPORT_SYMBOL_GPL(trace_event_raw_init);
133
3b8e4273 134static int ftrace_event_enable_disable(struct ftrace_event_call *call,
fd994989
SR
135 int enable)
136{
3b8e4273
LZ
137 int ret = 0;
138
fd994989
SR
139 switch (enable) {
140 case 0:
141 if (call->enabled) {
142 call->enabled = 0;
b11c53e1 143 tracing_stop_cmdline_record();
2239291a
SR
144 if (call->class->reg)
145 call->class->reg(call, TRACE_REG_UNREGISTER);
146 else
147 tracepoint_probe_unregister(call->name,
148 call->class->probe,
149 call);
fd994989 150 }
fd994989
SR
151 break;
152 case 1:
da4d0302 153 if (!call->enabled) {
b11c53e1 154 tracing_start_cmdline_record();
2239291a
SR
155 if (call->class->reg)
156 ret = call->class->reg(call, TRACE_REG_REGISTER);
157 else
158 ret = tracepoint_probe_register(call->name,
159 call->class->probe,
160 call);
3b8e4273
LZ
161 if (ret) {
162 tracing_stop_cmdline_record();
163 pr_info("event trace: Could not enable event "
164 "%s\n", call->name);
165 break;
166 }
167 call->enabled = 1;
fd994989 168 }
fd994989
SR
169 break;
170 }
3b8e4273
LZ
171
172 return ret;
fd994989
SR
173}
174
0e907c99
Z
175static void ftrace_clear_events(void)
176{
177 struct ftrace_event_call *call;
178
179 mutex_lock(&event_mutex);
180 list_for_each_entry(call, &ftrace_events, list) {
181 ftrace_event_enable_disable(call, 0);
182 }
183 mutex_unlock(&event_mutex);
184}
185
8f31bfe5
LZ
186/*
187 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
188 */
189static int __ftrace_set_clr_event(const char *match, const char *sub,
190 const char *event, int set)
b77e38aa 191{
a59fd602 192 struct ftrace_event_call *call;
29f93943 193 int ret = -EINVAL;
8f31bfe5
LZ
194
195 mutex_lock(&event_mutex);
196 list_for_each_entry(call, &ftrace_events, list) {
197
2239291a
SR
198 if (!call->name || !call->class ||
199 (!call->class->probe && !call->class->reg))
8f31bfe5
LZ
200 continue;
201
202 if (match &&
203 strcmp(match, call->name) != 0 &&
8f082018 204 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
205 continue;
206
8f082018 207 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
208 continue;
209
210 if (event && strcmp(event, call->name) != 0)
211 continue;
212
213 ftrace_event_enable_disable(call, set);
214
215 ret = 0;
216 }
217 mutex_unlock(&event_mutex);
218
219 return ret;
220}
221
222static int ftrace_set_clr_event(char *buf, int set)
223{
b628b3e6 224 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
225
226 /*
227 * The buf format can be <subsystem>:<event-name>
228 * *:<event-name> means any event by that name.
229 * :<event-name> is the same.
230 *
231 * <subsystem>:* means all events in that subsystem
232 * <subsystem>: means the same.
233 *
234 * <name> (no ':') means all events in a subsystem with
235 * the name <name> or any event that matches <name>
236 */
237
238 match = strsep(&buf, ":");
239 if (buf) {
240 sub = match;
241 event = buf;
242 match = NULL;
243
244 if (!strlen(sub) || strcmp(sub, "*") == 0)
245 sub = NULL;
246 if (!strlen(event) || strcmp(event, "*") == 0)
247 event = NULL;
248 }
b77e38aa 249
8f31bfe5 250 return __ftrace_set_clr_event(match, sub, event, set);
b77e38aa
SR
251}
252
4671c794
SR
253/**
254 * trace_set_clr_event - enable or disable an event
255 * @system: system name to match (NULL for any system)
256 * @event: event name to match (NULL for all events, within system)
257 * @set: 1 to enable, 0 to disable
258 *
259 * This is a way for other parts of the kernel to enable or disable
260 * event recording.
261 *
262 * Returns 0 on success, -EINVAL if the parameters do not match any
263 * registered events.
264 */
265int trace_set_clr_event(const char *system, const char *event, int set)
266{
267 return __ftrace_set_clr_event(NULL, system, event, set);
268}
269
b77e38aa
SR
270/* 128 should be much more than enough */
271#define EVENT_BUF_SIZE 127
272
273static ssize_t
274ftrace_event_write(struct file *file, const char __user *ubuf,
275 size_t cnt, loff_t *ppos)
276{
48966364 277 struct trace_parser parser;
4ba7978e 278 ssize_t read, ret;
b77e38aa 279
4ba7978e 280 if (!cnt)
b77e38aa
SR
281 return 0;
282
1852fcce
SR
283 ret = tracing_update_buffers();
284 if (ret < 0)
285 return ret;
286
48966364 287 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
288 return -ENOMEM;
289
48966364 290 read = trace_get_user(&parser, ubuf, cnt, ppos);
291
4ba7978e 292 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 293 int set = 1;
b77e38aa 294
48966364 295 if (*parser.buffer == '!')
b77e38aa 296 set = 0;
b77e38aa 297
48966364 298 parser.buffer[parser.idx] = 0;
299
300 ret = ftrace_set_clr_event(parser.buffer + !set, set);
b77e38aa 301 if (ret)
48966364 302 goto out_put;
b77e38aa 303 }
b77e38aa
SR
304
305 ret = read;
306
48966364 307 out_put:
308 trace_parser_put(&parser);
b77e38aa
SR
309
310 return ret;
311}
312
313static void *
314t_next(struct seq_file *m, void *v, loff_t *pos)
315{
30bd39cd 316 struct ftrace_event_call *call = v;
b77e38aa
SR
317
318 (*pos)++;
319
30bd39cd 320 list_for_each_entry_continue(call, &ftrace_events, list) {
40e26815
SR
321 /*
322 * The ftrace subsystem is for showing formats only.
323 * They can not be enabled or disabled via the event files.
324 */
2239291a 325 if (call->class && (call->class->probe || call->class->reg))
30bd39cd 326 return call;
40e26815 327 }
b77e38aa 328
30bd39cd 329 return NULL;
b77e38aa
SR
330}
331
332static void *t_start(struct seq_file *m, loff_t *pos)
333{
30bd39cd 334 struct ftrace_event_call *call;
e1c7e2a6
LZ
335 loff_t l;
336
20c8928a 337 mutex_lock(&event_mutex);
e1c7e2a6 338
30bd39cd 339 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 340 for (l = 0; l <= *pos; ) {
30bd39cd 341 call = t_next(m, call, &l);
e1c7e2a6
LZ
342 if (!call)
343 break;
344 }
345 return call;
b77e38aa
SR
346}
347
348static void *
349s_next(struct seq_file *m, void *v, loff_t *pos)
350{
30bd39cd 351 struct ftrace_event_call *call = v;
b77e38aa
SR
352
353 (*pos)++;
354
30bd39cd
LZ
355 list_for_each_entry_continue(call, &ftrace_events, list) {
356 if (call->enabled)
357 return call;
b77e38aa
SR
358 }
359
30bd39cd 360 return NULL;
b77e38aa
SR
361}
362
363static void *s_start(struct seq_file *m, loff_t *pos)
364{
30bd39cd 365 struct ftrace_event_call *call;
e1c7e2a6
LZ
366 loff_t l;
367
20c8928a 368 mutex_lock(&event_mutex);
e1c7e2a6 369
30bd39cd 370 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 371 for (l = 0; l <= *pos; ) {
30bd39cd 372 call = s_next(m, call, &l);
e1c7e2a6
LZ
373 if (!call)
374 break;
375 }
376 return call;
b77e38aa
SR
377}
378
379static int t_show(struct seq_file *m, void *v)
380{
381 struct ftrace_event_call *call = v;
382
8f082018
SR
383 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
384 seq_printf(m, "%s:", call->class->system);
b77e38aa
SR
385 seq_printf(m, "%s\n", call->name);
386
387 return 0;
388}
389
390static void t_stop(struct seq_file *m, void *p)
391{
20c8928a 392 mutex_unlock(&event_mutex);
b77e38aa
SR
393}
394
395static int
396ftrace_event_seq_open(struct inode *inode, struct file *file)
397{
b77e38aa
SR
398 const struct seq_operations *seq_ops;
399
400 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 401 (file->f_flags & O_TRUNC))
b77e38aa
SR
402 ftrace_clear_events();
403
404 seq_ops = inode->i_private;
20c8928a 405 return seq_open(file, seq_ops);
b77e38aa
SR
406}
407
1473e441
SR
408static ssize_t
409event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
410 loff_t *ppos)
411{
412 struct ftrace_event_call *call = filp->private_data;
413 char *buf;
414
da4d0302 415 if (call->enabled)
1473e441
SR
416 buf = "1\n";
417 else
418 buf = "0\n";
419
420 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
421}
422
423static ssize_t
424event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
425 loff_t *ppos)
426{
427 struct ftrace_event_call *call = filp->private_data;
428 char buf[64];
429 unsigned long val;
430 int ret;
431
432 if (cnt >= sizeof(buf))
433 return -EINVAL;
434
435 if (copy_from_user(&buf, ubuf, cnt))
436 return -EFAULT;
437
438 buf[cnt] = 0;
439
440 ret = strict_strtoul(buf, 10, &val);
441 if (ret < 0)
442 return ret;
443
1852fcce
SR
444 ret = tracing_update_buffers();
445 if (ret < 0)
446 return ret;
447
1473e441
SR
448 switch (val) {
449 case 0:
1473e441 450 case 1:
11a241a3 451 mutex_lock(&event_mutex);
3b8e4273 452 ret = ftrace_event_enable_disable(call, val);
11a241a3 453 mutex_unlock(&event_mutex);
1473e441
SR
454 break;
455
456 default:
457 return -EINVAL;
458 }
459
460 *ppos += cnt;
461
3b8e4273 462 return ret ? ret : cnt;
1473e441
SR
463}
464
8ae79a13
SR
465static ssize_t
466system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
467 loff_t *ppos)
468{
c142b15d 469 const char set_to_char[4] = { '?', '0', '1', 'X' };
8ae79a13
SR
470 const char *system = filp->private_data;
471 struct ftrace_event_call *call;
472 char buf[2];
c142b15d 473 int set = 0;
8ae79a13
SR
474 int ret;
475
8ae79a13
SR
476 mutex_lock(&event_mutex);
477 list_for_each_entry(call, &ftrace_events, list) {
2239291a
SR
478 if (!call->name || !call->class ||
479 (!call->class->probe && !call->class->reg))
8ae79a13
SR
480 continue;
481
8f082018 482 if (system && strcmp(call->class->system, system) != 0)
8ae79a13
SR
483 continue;
484
485 /*
486 * We need to find out if all the events are set
487 * or if all events or cleared, or if we have
488 * a mixture.
489 */
c142b15d
LZ
490 set |= (1 << !!call->enabled);
491
8ae79a13
SR
492 /*
493 * If we have a mixture, no need to look further.
494 */
c142b15d 495 if (set == 3)
8ae79a13
SR
496 break;
497 }
498 mutex_unlock(&event_mutex);
499
c142b15d 500 buf[0] = set_to_char[set];
8ae79a13 501 buf[1] = '\n';
8ae79a13
SR
502
503 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
504
505 return ret;
506}
507
508static ssize_t
509system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
510 loff_t *ppos)
511{
512 const char *system = filp->private_data;
513 unsigned long val;
8ae79a13
SR
514 char buf[64];
515 ssize_t ret;
516
517 if (cnt >= sizeof(buf))
518 return -EINVAL;
519
520 if (copy_from_user(&buf, ubuf, cnt))
521 return -EFAULT;
522
523 buf[cnt] = 0;
524
525 ret = strict_strtoul(buf, 10, &val);
526 if (ret < 0)
527 return ret;
528
529 ret = tracing_update_buffers();
530 if (ret < 0)
531 return ret;
532
8f31bfe5 533 if (val != 0 && val != 1)
8ae79a13 534 return -EINVAL;
8ae79a13 535
8f31bfe5 536 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
8ae79a13 537 if (ret)
8f31bfe5 538 goto out;
8ae79a13
SR
539
540 ret = cnt;
541
8f31bfe5 542out:
8ae79a13
SR
543 *ppos += cnt;
544
545 return ret;
546}
547
981d081e
SR
548static ssize_t
549event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
550 loff_t *ppos)
551{
552 struct ftrace_event_call *call = filp->private_data;
5a65e956 553 struct ftrace_event_field *field;
2e33af02 554 struct list_head *head;
981d081e 555 struct trace_seq *s;
5a65e956 556 int common_field_count = 5;
981d081e 557 char *buf;
5a65e956 558 int r = 0;
981d081e 559
c269fc8c
TZ
560 if (*ppos)
561 return 0;
562
981d081e
SR
563 s = kmalloc(sizeof(*s), GFP_KERNEL);
564 if (!s)
565 return -ENOMEM;
566
567 trace_seq_init(s);
568
c5e4e192
SR
569 trace_seq_printf(s, "name: %s\n", call->name);
570 trace_seq_printf(s, "ID: %d\n", call->id);
571 trace_seq_printf(s, "format:\n");
91729ef9 572
2e33af02
SR
573 head = trace_get_fields(call);
574 list_for_each_entry_reverse(field, head, link) {
5a65e956
LJ
575 /*
576 * Smartly shows the array type(except dynamic array).
577 * Normal:
578 * field:TYPE VAR
579 * If TYPE := TYPE[LEN], it is shown:
580 * field:TYPE VAR[LEN]
581 */
582 const char *array_descriptor = strchr(field->type, '[');
583
584 if (!strncmp(field->type, "__data_loc", 10))
585 array_descriptor = NULL;
586
587 if (!array_descriptor) {
588 r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
589 "\tsize:%u;\tsigned:%d;\n",
590 field->type, field->name, field->offset,
591 field->size, !!field->is_signed);
592 } else {
593 r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
594 "\tsize:%u;\tsigned:%d;\n",
595 (int)(array_descriptor - field->type),
596 field->type, field->name,
597 array_descriptor, field->offset,
598 field->size, !!field->is_signed);
599 }
600
601 if (--common_field_count == 0)
602 r = trace_seq_printf(s, "\n");
603
604 if (!r)
605 break;
606 }
607
608 if (r)
609 r = trace_seq_printf(s, "\nprint fmt: %s\n",
610 call->print_fmt);
611
981d081e
SR
612 if (!r) {
613 /*
614 * ug! The format output is bigger than a PAGE!!
615 */
616 buf = "FORMAT TOO BIG\n";
617 r = simple_read_from_buffer(ubuf, cnt, ppos,
618 buf, strlen(buf));
619 goto out;
620 }
621
622 r = simple_read_from_buffer(ubuf, cnt, ppos,
623 s->buffer, s->len);
624 out:
625 kfree(s);
626 return r;
627}
628
23725aee
PZ
629static ssize_t
630event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
631{
632 struct ftrace_event_call *call = filp->private_data;
633 struct trace_seq *s;
634 int r;
635
636 if (*ppos)
637 return 0;
638
639 s = kmalloc(sizeof(*s), GFP_KERNEL);
640 if (!s)
641 return -ENOMEM;
642
643 trace_seq_init(s);
644 trace_seq_printf(s, "%d\n", call->id);
645
646 r = simple_read_from_buffer(ubuf, cnt, ppos,
647 s->buffer, s->len);
648 kfree(s);
649 return r;
650}
651
7ce7e424
TZ
652static ssize_t
653event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
654 loff_t *ppos)
655{
656 struct ftrace_event_call *call = filp->private_data;
657 struct trace_seq *s;
658 int r;
659
660 if (*ppos)
661 return 0;
662
663 s = kmalloc(sizeof(*s), GFP_KERNEL);
664 if (!s)
665 return -ENOMEM;
666
667 trace_seq_init(s);
668
8b372562 669 print_event_filter(call, s);
4bda2d51 670 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
671
672 kfree(s);
673
674 return r;
675}
676
677static ssize_t
678event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
679 loff_t *ppos)
680{
681 struct ftrace_event_call *call = filp->private_data;
8b372562 682 char *buf;
7ce7e424
TZ
683 int err;
684
8b372562 685 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
686 return -EINVAL;
687
8b372562
TZ
688 buf = (char *)__get_free_page(GFP_TEMPORARY);
689 if (!buf)
7ce7e424
TZ
690 return -ENOMEM;
691
8b372562
TZ
692 if (copy_from_user(buf, ubuf, cnt)) {
693 free_page((unsigned long) buf);
694 return -EFAULT;
7ce7e424 695 }
8b372562 696 buf[cnt] = '\0';
7ce7e424 697
8b372562
TZ
698 err = apply_event_filter(call, buf);
699 free_page((unsigned long) buf);
700 if (err < 0)
44e9c8b7 701 return err;
0a19e53c 702
7ce7e424
TZ
703 *ppos += cnt;
704
705 return cnt;
706}
707
cfb180f3
TZ
708static ssize_t
709subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
710 loff_t *ppos)
711{
712 struct event_subsystem *system = filp->private_data;
713 struct trace_seq *s;
714 int r;
715
716 if (*ppos)
717 return 0;
718
719 s = kmalloc(sizeof(*s), GFP_KERNEL);
720 if (!s)
721 return -ENOMEM;
722
723 trace_seq_init(s);
724
8b372562 725 print_subsystem_event_filter(system, s);
4bda2d51 726 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
727
728 kfree(s);
729
730 return r;
731}
732
733static ssize_t
734subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
735 loff_t *ppos)
736{
737 struct event_subsystem *system = filp->private_data;
8b372562 738 char *buf;
cfb180f3
TZ
739 int err;
740
8b372562 741 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
742 return -EINVAL;
743
8b372562
TZ
744 buf = (char *)__get_free_page(GFP_TEMPORARY);
745 if (!buf)
cfb180f3
TZ
746 return -ENOMEM;
747
8b372562
TZ
748 if (copy_from_user(buf, ubuf, cnt)) {
749 free_page((unsigned long) buf);
750 return -EFAULT;
cfb180f3 751 }
8b372562 752 buf[cnt] = '\0';
cfb180f3 753
8b372562
TZ
754 err = apply_subsystem_event_filter(system, buf);
755 free_page((unsigned long) buf);
756 if (err < 0)
44e9c8b7 757 return err;
cfb180f3
TZ
758
759 *ppos += cnt;
760
761 return cnt;
762}
763
d1b182a8
SR
764static ssize_t
765show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
766{
767 int (*func)(struct trace_seq *s) = filp->private_data;
768 struct trace_seq *s;
769 int r;
770
771 if (*ppos)
772 return 0;
773
774 s = kmalloc(sizeof(*s), GFP_KERNEL);
775 if (!s)
776 return -ENOMEM;
777
778 trace_seq_init(s);
779
780 func(s);
781 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
782
783 kfree(s);
784
785 return r;
786}
787
b77e38aa
SR
788static const struct seq_operations show_event_seq_ops = {
789 .start = t_start,
790 .next = t_next,
791 .show = t_show,
792 .stop = t_stop,
793};
794
795static const struct seq_operations show_set_event_seq_ops = {
796 .start = s_start,
797 .next = s_next,
798 .show = t_show,
799 .stop = t_stop,
800};
801
2314c4ae
SR
802static const struct file_operations ftrace_avail_fops = {
803 .open = ftrace_event_seq_open,
804 .read = seq_read,
805 .llseek = seq_lseek,
806 .release = seq_release,
807};
808
b77e38aa
SR
809static const struct file_operations ftrace_set_event_fops = {
810 .open = ftrace_event_seq_open,
811 .read = seq_read,
812 .write = ftrace_event_write,
813 .llseek = seq_lseek,
814 .release = seq_release,
815};
816
1473e441
SR
817static const struct file_operations ftrace_enable_fops = {
818 .open = tracing_open_generic,
819 .read = event_enable_read,
820 .write = event_enable_write,
821};
822
981d081e
SR
823static const struct file_operations ftrace_event_format_fops = {
824 .open = tracing_open_generic,
825 .read = event_format_read,
826};
827
23725aee
PZ
828static const struct file_operations ftrace_event_id_fops = {
829 .open = tracing_open_generic,
830 .read = event_id_read,
831};
832
7ce7e424
TZ
833static const struct file_operations ftrace_event_filter_fops = {
834 .open = tracing_open_generic,
835 .read = event_filter_read,
836 .write = event_filter_write,
837};
838
cfb180f3
TZ
839static const struct file_operations ftrace_subsystem_filter_fops = {
840 .open = tracing_open_generic,
841 .read = subsystem_filter_read,
842 .write = subsystem_filter_write,
843};
844
8ae79a13
SR
845static const struct file_operations ftrace_system_enable_fops = {
846 .open = tracing_open_generic,
847 .read = system_enable_read,
848 .write = system_enable_write,
849};
850
d1b182a8
SR
851static const struct file_operations ftrace_show_header_fops = {
852 .open = tracing_open_generic,
853 .read = show_header,
854};
855
1473e441
SR
856static struct dentry *event_trace_events_dir(void)
857{
858 static struct dentry *d_tracer;
859 static struct dentry *d_events;
860
861 if (d_events)
862 return d_events;
863
864 d_tracer = tracing_init_dentry();
865 if (!d_tracer)
866 return NULL;
867
868 d_events = debugfs_create_dir("events", d_tracer);
869 if (!d_events)
870 pr_warning("Could not create debugfs "
871 "'events' directory\n");
872
873 return d_events;
874}
875
6ecc2d1c
SR
876static LIST_HEAD(event_subsystems);
877
878static struct dentry *
879event_subsystem_dir(const char *name, struct dentry *d_events)
880{
881 struct event_subsystem *system;
e1112b4d 882 struct dentry *entry;
6ecc2d1c
SR
883
884 /* First see if we did not already create this dir */
885 list_for_each_entry(system, &event_subsystems, list) {
dc82ec98
XG
886 if (strcmp(system->name, name) == 0) {
887 system->nr_events++;
6ecc2d1c 888 return system->entry;
dc82ec98 889 }
6ecc2d1c
SR
890 }
891
892 /* need to create new entry */
893 system = kmalloc(sizeof(*system), GFP_KERNEL);
894 if (!system) {
895 pr_warning("No memory to create event subsystem %s\n",
896 name);
897 return d_events;
898 }
899
900 system->entry = debugfs_create_dir(name, d_events);
901 if (!system->entry) {
902 pr_warning("Could not create event subsystem %s\n",
903 name);
904 kfree(system);
905 return d_events;
906 }
907
dc82ec98 908 system->nr_events = 1;
6d723736
SR
909 system->name = kstrdup(name, GFP_KERNEL);
910 if (!system->name) {
911 debugfs_remove(system->entry);
912 kfree(system);
913 return d_events;
914 }
915
6ecc2d1c
SR
916 list_add(&system->list, &event_subsystems);
917
30e673b2 918 system->filter = NULL;
cfb180f3 919
8b372562
TZ
920 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
921 if (!system->filter) {
922 pr_warning("Could not allocate filter for subsystem "
923 "'%s'\n", name);
924 return system->entry;
925 }
926
e1112b4d
TZ
927 entry = debugfs_create_file("filter", 0644, system->entry, system,
928 &ftrace_subsystem_filter_fops);
8b372562
TZ
929 if (!entry) {
930 kfree(system->filter);
931 system->filter = NULL;
e1112b4d
TZ
932 pr_warning("Could not create debugfs "
933 "'%s/filter' entry\n", name);
8b372562 934 }
e1112b4d 935
f3f3f009
FW
936 trace_create_file("enable", 0644, system->entry,
937 (void *)system->name,
938 &ftrace_system_enable_fops);
8ae79a13 939
6ecc2d1c
SR
940 return system->entry;
941}
942
1473e441 943static int
701970b3
SR
944event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
945 const struct file_operations *id,
946 const struct file_operations *enable,
947 const struct file_operations *filter,
948 const struct file_operations *format)
1473e441 949{
2e33af02 950 struct list_head *head;
fd994989 951 int ret;
1473e441 952
6ecc2d1c
SR
953 /*
954 * If the trace point header did not define TRACE_SYSTEM
955 * then the system would be called "TRACE_SYSTEM".
956 */
8f082018
SR
957 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
958 d_events = event_subsystem_dir(call->class->system, d_events);
6ecc2d1c 959
1473e441
SR
960 call->dir = debugfs_create_dir(call->name, d_events);
961 if (!call->dir) {
962 pr_warning("Could not create debugfs "
963 "'%s' directory\n", call->name);
964 return -1;
965 }
966
2239291a 967 if (call->class->probe || call->class->reg)
f3f3f009
FW
968 trace_create_file("enable", 0644, call->dir, call,
969 enable);
1473e441 970
2239291a
SR
971#ifdef CONFIG_PERF_EVENTS
972 if (call->id && (call->class->perf_probe || call->class->reg))
f3f3f009
FW
973 trace_create_file("id", 0444, call->dir, call,
974 id);
2239291a 975#endif
23725aee 976
2e33af02
SR
977 if (call->class->define_fields) {
978 /*
979 * Other events may have the same class. Only update
980 * the fields if they are not already defined.
981 */
982 head = trace_get_fields(call);
983 if (list_empty(head)) {
984 ret = trace_define_common_fields(call);
985 if (!ret)
986 ret = call->class->define_fields(call);
987 if (ret < 0) {
988 pr_warning("Could not initialize trace point"
989 " events/%s\n", call->name);
990 return ret;
991 }
cf027f64 992 }
f3f3f009
FW
993 trace_create_file("filter", 0644, call->dir, call,
994 filter);
cf027f64
TZ
995 }
996
f3f3f009
FW
997 trace_create_file("format", 0444, call->dir, call,
998 format);
6d723736
SR
999
1000 return 0;
1001}
1002
bd1a5c84
MH
1003static int __trace_add_event_call(struct ftrace_event_call *call)
1004{
1005 struct dentry *d_events;
1006 int ret;
6d723736 1007
bd1a5c84
MH
1008 if (!call->name)
1009 return -EINVAL;
701970b3 1010
bd1a5c84
MH
1011 if (call->raw_init) {
1012 ret = call->raw_init(call);
1013 if (ret < 0) {
1014 if (ret != -ENOSYS)
1015 pr_warning("Could not initialize trace "
1016 "events/%s\n", call->name);
1017 return ret;
1018 }
1019 }
701970b3 1020
bd1a5c84
MH
1021 d_events = event_trace_events_dir();
1022 if (!d_events)
1023 return -ENOENT;
1024
588bebb7 1025 ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
bd1a5c84
MH
1026 &ftrace_enable_fops, &ftrace_event_filter_fops,
1027 &ftrace_event_format_fops);
88f70d75
MH
1028 if (!ret)
1029 list_add(&call->list, &ftrace_events);
1030
588bebb7 1031 return ret;
bd1a5c84
MH
1032}
1033
1034/* Add an additional event_call dynamically */
1035int trace_add_event_call(struct ftrace_event_call *call)
1036{
1037 int ret;
1038 mutex_lock(&event_mutex);
1039 ret = __trace_add_event_call(call);
1040 mutex_unlock(&event_mutex);
1041 return ret;
1042}
701970b3 1043
a2ca5e03
FW
1044static void remove_subsystem_dir(const char *name)
1045{
1046 struct event_subsystem *system;
1047
1048 if (strcmp(name, TRACE_SYSTEM) == 0)
1049 return;
1050
1051 list_for_each_entry(system, &event_subsystems, list) {
1052 if (strcmp(system->name, name) == 0) {
1053 if (!--system->nr_events) {
1054 struct event_filter *filter = system->filter;
1055
1056 debugfs_remove_recursive(system->entry);
1057 list_del(&system->list);
1058 if (filter) {
1059 kfree(filter->filter_string);
1060 kfree(filter);
1061 }
1062 kfree(system->name);
1063 kfree(system);
1064 }
1065 break;
1066 }
1067 }
1068}
1069
4fead8e4
MH
1070/*
1071 * Must be called under locking both of event_mutex and trace_event_mutex.
1072 */
bd1a5c84
MH
1073static void __trace_remove_event_call(struct ftrace_event_call *call)
1074{
1075 ftrace_event_enable_disable(call, 0);
1076 if (call->event)
1077 __unregister_ftrace_event(call->event);
1078 debugfs_remove_recursive(call->dir);
1079 list_del(&call->list);
1080 trace_destroy_fields(call);
1081 destroy_preds(call);
8f082018 1082 remove_subsystem_dir(call->class->system);
bd1a5c84
MH
1083}
1084
1085/* Remove an event_call */
1086void trace_remove_event_call(struct ftrace_event_call *call)
1087{
1088 mutex_lock(&event_mutex);
4fead8e4 1089 down_write(&trace_event_mutex);
bd1a5c84 1090 __trace_remove_event_call(call);
4fead8e4 1091 up_write(&trace_event_mutex);
bd1a5c84
MH
1092 mutex_unlock(&event_mutex);
1093}
1094
1095#define for_each_event(event, start, end) \
1096 for (event = start; \
1097 (unsigned long)event < (unsigned long)end; \
1098 event++)
1099
1100#ifdef CONFIG_MODULES
1101
1102static LIST_HEAD(ftrace_module_file_list);
1103
1104/*
1105 * Modules must own their file_operations to keep up with
1106 * reference counting.
1107 */
1108struct ftrace_module_file_ops {
1109 struct list_head list;
1110 struct module *mod;
1111 struct file_operations id;
1112 struct file_operations enable;
1113 struct file_operations format;
1114 struct file_operations filter;
1115};
1116
701970b3
SR
1117static struct ftrace_module_file_ops *
1118trace_create_file_ops(struct module *mod)
1119{
1120 struct ftrace_module_file_ops *file_ops;
1121
1122 /*
1123 * This is a bit of a PITA. To allow for correct reference
1124 * counting, modules must "own" their file_operations.
1125 * To do this, we allocate the file operations that will be
1126 * used in the event directory.
1127 */
1128
1129 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1130 if (!file_ops)
1131 return NULL;
1132
1133 file_ops->mod = mod;
1134
1135 file_ops->id = ftrace_event_id_fops;
1136 file_ops->id.owner = mod;
1137
1138 file_ops->enable = ftrace_enable_fops;
1139 file_ops->enable.owner = mod;
1140
1141 file_ops->filter = ftrace_event_filter_fops;
1142 file_ops->filter.owner = mod;
1143
1144 file_ops->format = ftrace_event_format_fops;
1145 file_ops->format.owner = mod;
1146
1147 list_add(&file_ops->list, &ftrace_module_file_list);
1148
1149 return file_ops;
1150}
1151
6d723736
SR
1152static void trace_module_add_events(struct module *mod)
1153{
701970b3 1154 struct ftrace_module_file_ops *file_ops = NULL;
6d723736
SR
1155 struct ftrace_event_call *call, *start, *end;
1156 struct dentry *d_events;
f744bd57 1157 int ret;
6d723736
SR
1158
1159 start = mod->trace_events;
1160 end = mod->trace_events + mod->num_trace_events;
1161
1162 if (start == end)
1163 return;
1164
1165 d_events = event_trace_events_dir();
1166 if (!d_events)
1167 return;
1168
1169 for_each_event(call, start, end) {
1170 /* The linker may leave blanks */
1171 if (!call->name)
1172 continue;
f744bd57 1173 if (call->raw_init) {
bd1a5c84 1174 ret = call->raw_init(call);
f744bd57
JB
1175 if (ret < 0) {
1176 if (ret != -ENOSYS)
1177 pr_warning("Could not initialize trace "
1178 "point events/%s\n", call->name);
1179 continue;
1180 }
1181 }
701970b3
SR
1182 /*
1183 * This module has events, create file ops for this module
1184 * if not already done.
1185 */
1186 if (!file_ops) {
1187 file_ops = trace_create_file_ops(mod);
1188 if (!file_ops)
1189 return;
1190 }
6d723736 1191 call->mod = mod;
88f70d75
MH
1192 ret = event_create_dir(call, d_events,
1193 &file_ops->id, &file_ops->enable,
1194 &file_ops->filter, &file_ops->format);
1195 if (!ret)
1196 list_add(&call->list, &ftrace_events);
6d723736
SR
1197 }
1198}
1199
1200static void trace_module_remove_events(struct module *mod)
1201{
701970b3 1202 struct ftrace_module_file_ops *file_ops;
6d723736 1203 struct ftrace_event_call *call, *p;
9456f0fa 1204 bool found = false;
6d723736 1205
110bf2b7 1206 down_write(&trace_event_mutex);
6d723736
SR
1207 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1208 if (call->mod == mod) {
9456f0fa 1209 found = true;
bd1a5c84 1210 __trace_remove_event_call(call);
6d723736
SR
1211 }
1212 }
701970b3
SR
1213
1214 /* Now free the file_operations */
1215 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1216 if (file_ops->mod == mod)
1217 break;
1218 }
1219 if (&file_ops->list != &ftrace_module_file_list) {
1220 list_del(&file_ops->list);
1221 kfree(file_ops);
1222 }
9456f0fa
SR
1223
1224 /*
1225 * It is safest to reset the ring buffer if the module being unloaded
1226 * registered any events.
1227 */
1228 if (found)
1229 tracing_reset_current_online_cpus();
110bf2b7 1230 up_write(&trace_event_mutex);
6d723736
SR
1231}
1232
61f919a1
SR
1233static int trace_module_notify(struct notifier_block *self,
1234 unsigned long val, void *data)
6d723736
SR
1235{
1236 struct module *mod = data;
1237
1238 mutex_lock(&event_mutex);
1239 switch (val) {
1240 case MODULE_STATE_COMING:
1241 trace_module_add_events(mod);
1242 break;
1243 case MODULE_STATE_GOING:
1244 trace_module_remove_events(mod);
1245 break;
1246 }
1247 mutex_unlock(&event_mutex);
fd994989 1248
1473e441
SR
1249 return 0;
1250}
61f919a1
SR
1251#else
1252static int trace_module_notify(struct notifier_block *self,
1253 unsigned long val, void *data)
1254{
1255 return 0;
1256}
1257#endif /* CONFIG_MODULES */
1473e441 1258
ec827c7e 1259static struct notifier_block trace_module_nb = {
6d723736
SR
1260 .notifier_call = trace_module_notify,
1261 .priority = 0,
1262};
1263
a59fd602
SR
1264extern struct ftrace_event_call __start_ftrace_events[];
1265extern struct ftrace_event_call __stop_ftrace_events[];
1266
020e5f85
LZ
1267static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1268
1269static __init int setup_trace_event(char *str)
1270{
1271 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1272 ring_buffer_expanded = 1;
1273 tracing_selftest_disabled = 1;
1274
1275 return 1;
1276}
1277__setup("trace_event=", setup_trace_event);
1278
b77e38aa
SR
1279static __init int event_trace_init(void)
1280{
a59fd602 1281 struct ftrace_event_call *call;
b77e38aa
SR
1282 struct dentry *d_tracer;
1283 struct dentry *entry;
1473e441 1284 struct dentry *d_events;
6d723736 1285 int ret;
020e5f85
LZ
1286 char *buf = bootup_event_buf;
1287 char *token;
b77e38aa
SR
1288
1289 d_tracer = tracing_init_dentry();
1290 if (!d_tracer)
1291 return 0;
1292
2314c4ae
SR
1293 entry = debugfs_create_file("available_events", 0444, d_tracer,
1294 (void *)&show_event_seq_ops,
1295 &ftrace_avail_fops);
1296 if (!entry)
1297 pr_warning("Could not create debugfs "
1298 "'available_events' entry\n");
1299
b77e38aa
SR
1300 entry = debugfs_create_file("set_event", 0644, d_tracer,
1301 (void *)&show_set_event_seq_ops,
1302 &ftrace_set_event_fops);
1303 if (!entry)
1304 pr_warning("Could not create debugfs "
1305 "'set_event' entry\n");
1306
1473e441
SR
1307 d_events = event_trace_events_dir();
1308 if (!d_events)
1309 return 0;
1310
d1b182a8
SR
1311 /* ring buffer internal formats */
1312 trace_create_file("header_page", 0444, d_events,
1313 ring_buffer_print_page_header,
1314 &ftrace_show_header_fops);
1315
1316 trace_create_file("header_event", 0444, d_events,
1317 ring_buffer_print_entry_header,
1318 &ftrace_show_header_fops);
1319
8ae79a13 1320 trace_create_file("enable", 0644, d_events,
8f31bfe5 1321 NULL, &ftrace_system_enable_fops);
8ae79a13 1322
6d723736 1323 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1473e441
SR
1324 /* The linker may leave blanks */
1325 if (!call->name)
1326 continue;
f744bd57 1327 if (call->raw_init) {
bd1a5c84 1328 ret = call->raw_init(call);
f744bd57
JB
1329 if (ret < 0) {
1330 if (ret != -ENOSYS)
1331 pr_warning("Could not initialize trace "
1332 "point events/%s\n", call->name);
1333 continue;
1334 }
1335 }
88f70d75
MH
1336 ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
1337 &ftrace_enable_fops,
1338 &ftrace_event_filter_fops,
1339 &ftrace_event_format_fops);
1340 if (!ret)
1341 list_add(&call->list, &ftrace_events);
1473e441
SR
1342 }
1343
020e5f85
LZ
1344 while (true) {
1345 token = strsep(&buf, ",");
1346
1347 if (!token)
1348 break;
1349 if (!*token)
1350 continue;
1351
1352 ret = ftrace_set_clr_event(token, 1);
1353 if (ret)
1354 pr_warning("Failed to enable trace event: %s\n", token);
1355 }
1356
6d723736 1357 ret = register_module_notifier(&trace_module_nb);
55379376 1358 if (ret)
6d723736
SR
1359 pr_warning("Failed to register trace events module notifier\n");
1360
b77e38aa
SR
1361 return 0;
1362}
1363fs_initcall(event_trace_init);
e6187007
SR
1364
1365#ifdef CONFIG_FTRACE_STARTUP_TEST
1366
1367static DEFINE_SPINLOCK(test_spinlock);
1368static DEFINE_SPINLOCK(test_spinlock_irq);
1369static DEFINE_MUTEX(test_mutex);
1370
1371static __init void test_work(struct work_struct *dummy)
1372{
1373 spin_lock(&test_spinlock);
1374 spin_lock_irq(&test_spinlock_irq);
1375 udelay(1);
1376 spin_unlock_irq(&test_spinlock_irq);
1377 spin_unlock(&test_spinlock);
1378
1379 mutex_lock(&test_mutex);
1380 msleep(1);
1381 mutex_unlock(&test_mutex);
1382}
1383
1384static __init int event_test_thread(void *unused)
1385{
1386 void *test_malloc;
1387
1388 test_malloc = kmalloc(1234, GFP_KERNEL);
1389 if (!test_malloc)
1390 pr_info("failed to kmalloc\n");
1391
1392 schedule_on_each_cpu(test_work);
1393
1394 kfree(test_malloc);
1395
1396 set_current_state(TASK_INTERRUPTIBLE);
1397 while (!kthread_should_stop())
1398 schedule();
1399
1400 return 0;
1401}
1402
1403/*
1404 * Do various things that may trigger events.
1405 */
1406static __init void event_test_stuff(void)
1407{
1408 struct task_struct *test_thread;
1409
1410 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1411 msleep(1);
1412 kthread_stop(test_thread);
1413}
1414
1415/*
1416 * For every trace event defined, we will test each trace point separately,
1417 * and then by groups, and finally all trace points.
1418 */
9ea21c1e 1419static __init void event_trace_self_tests(void)
e6187007
SR
1420{
1421 struct ftrace_event_call *call;
1422 struct event_subsystem *system;
e6187007
SR
1423 int ret;
1424
1425 pr_info("Running tests on trace events:\n");
1426
1427 list_for_each_entry(call, &ftrace_events, list) {
1428
2239291a
SR
1429 /* Only test those that have a probe */
1430 if (!call->class || !call->class->probe)
e6187007
SR
1431 continue;
1432
1f5a6b45
SR
1433/*
1434 * Testing syscall events here is pretty useless, but
1435 * we still do it if configured. But this is time consuming.
1436 * What we really need is a user thread to perform the
1437 * syscalls as we test.
1438 */
1439#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
1440 if (call->class->system &&
1441 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
1442 continue;
1443#endif
1444
e6187007
SR
1445 pr_info("Testing event %s: ", call->name);
1446
1447 /*
1448 * If an event is already enabled, someone is using
1449 * it and the self test should not be on.
1450 */
1451 if (call->enabled) {
1452 pr_warning("Enabled event during self test!\n");
1453 WARN_ON_ONCE(1);
1454 continue;
1455 }
1456
0e907c99 1457 ftrace_event_enable_disable(call, 1);
e6187007 1458 event_test_stuff();
0e907c99 1459 ftrace_event_enable_disable(call, 0);
e6187007
SR
1460
1461 pr_cont("OK\n");
1462 }
1463
1464 /* Now test at the sub system level */
1465
1466 pr_info("Running tests on trace event systems:\n");
1467
1468 list_for_each_entry(system, &event_subsystems, list) {
1469
1470 /* the ftrace system is special, skip it */
1471 if (strcmp(system->name, "ftrace") == 0)
1472 continue;
1473
1474 pr_info("Testing event system %s: ", system->name);
1475
8f31bfe5 1476 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
e6187007
SR
1477 if (WARN_ON_ONCE(ret)) {
1478 pr_warning("error enabling system %s\n",
1479 system->name);
1480 continue;
1481 }
1482
1483 event_test_stuff();
1484
8f31bfe5 1485 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
e6187007
SR
1486 if (WARN_ON_ONCE(ret))
1487 pr_warning("error disabling system %s\n",
1488 system->name);
1489
1490 pr_cont("OK\n");
1491 }
1492
1493 /* Test with all events enabled */
1494
1495 pr_info("Running tests on all trace events:\n");
1496 pr_info("Testing all events: ");
1497
8f31bfe5 1498 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
e6187007 1499 if (WARN_ON_ONCE(ret)) {
e6187007 1500 pr_warning("error enabling all events\n");
9ea21c1e 1501 return;
e6187007
SR
1502 }
1503
1504 event_test_stuff();
1505
1506 /* reset sysname */
8f31bfe5 1507 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
e6187007
SR
1508 if (WARN_ON_ONCE(ret)) {
1509 pr_warning("error disabling all events\n");
9ea21c1e 1510 return;
e6187007
SR
1511 }
1512
1513 pr_cont("OK\n");
9ea21c1e
SR
1514}
1515
1516#ifdef CONFIG_FUNCTION_TRACER
1517
245b2e70 1518static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
1519
1520static void
1521function_test_events_call(unsigned long ip, unsigned long parent_ip)
1522{
1523 struct ring_buffer_event *event;
e77405ad 1524 struct ring_buffer *buffer;
9ea21c1e
SR
1525 struct ftrace_entry *entry;
1526 unsigned long flags;
1527 long disabled;
1528 int resched;
1529 int cpu;
1530 int pc;
1531
1532 pc = preempt_count();
1533 resched = ftrace_preempt_disable();
1534 cpu = raw_smp_processor_id();
245b2e70 1535 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
1536
1537 if (disabled != 1)
1538 goto out;
1539
1540 local_save_flags(flags);
1541
e77405ad
SR
1542 event = trace_current_buffer_lock_reserve(&buffer,
1543 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
1544 flags, pc);
1545 if (!event)
1546 goto out;
1547 entry = ring_buffer_event_data(event);
1548 entry->ip = ip;
1549 entry->parent_ip = parent_ip;
1550
e77405ad 1551 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
1552
1553 out:
245b2e70 1554 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
1555 ftrace_preempt_enable(resched);
1556}
1557
1558static struct ftrace_ops trace_ops __initdata =
1559{
1560 .func = function_test_events_call,
1561};
1562
1563static __init void event_trace_self_test_with_function(void)
1564{
1565 register_ftrace_function(&trace_ops);
1566 pr_info("Running tests again, along with the function tracer\n");
1567 event_trace_self_tests();
1568 unregister_ftrace_function(&trace_ops);
1569}
1570#else
1571static __init void event_trace_self_test_with_function(void)
1572{
1573}
1574#endif
1575
1576static __init int event_trace_self_tests_init(void)
1577{
020e5f85
LZ
1578 if (!tracing_selftest_disabled) {
1579 event_trace_self_tests();
1580 event_trace_self_test_with_function();
1581 }
e6187007
SR
1582
1583 return 0;
1584}
1585
28d20e2d 1586late_initcall(event_trace_self_tests_init);
e6187007
SR
1587
1588#endif
This page took 0.1548 seconds and 5 git commands to generate.