tracing: Extract duplicate ftrace_raw_init_event_foo()
[deliverable/linux.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
e6187007 18#include <linux/delay.h>
b77e38aa 19
020e5f85
LZ
20#include <asm/setup.h>
21
91729ef9 22#include "trace_output.h"
b77e38aa 23
4e5292ea 24#undef TRACE_SYSTEM
b628b3e6
SR
25#define TRACE_SYSTEM "TRACE_SYSTEM"
26
20c8928a 27DEFINE_MUTEX(event_mutex);
11a241a3 28
a59fd602
SR
29LIST_HEAD(ftrace_events);
30
aeaeae11
FW
31int trace_define_field(struct ftrace_event_call *call, const char *type,
32 const char *name, int offset, int size, int is_signed,
43b51ead 33 int filter_type)
cf027f64
TZ
34{
35 struct ftrace_event_field *field;
36
fe9f57f2 37 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
38 if (!field)
39 goto err;
fe9f57f2 40
cf027f64
TZ
41 field->name = kstrdup(name, GFP_KERNEL);
42 if (!field->name)
43 goto err;
fe9f57f2 44
cf027f64
TZ
45 field->type = kstrdup(type, GFP_KERNEL);
46 if (!field->type)
47 goto err;
fe9f57f2 48
43b51ead
LZ
49 if (filter_type == FILTER_OTHER)
50 field->filter_type = filter_assign_type(type);
51 else
52 field->filter_type = filter_type;
53
cf027f64
TZ
54 field->offset = offset;
55 field->size = size;
a118e4d1 56 field->is_signed = is_signed;
aa38e9fc 57
cf027f64
TZ
58 list_add(&field->link, &call->fields);
59
60 return 0;
fe9f57f2 61
cf027f64
TZ
62err:
63 if (field) {
64 kfree(field->name);
65 kfree(field->type);
66 }
67 kfree(field);
fe9f57f2 68
cf027f64
TZ
69 return -ENOMEM;
70}
17c873ec 71EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 72
e647d6b3
LZ
73#define __common_field(type, item) \
74 ret = trace_define_field(call, #type, "common_" #item, \
75 offsetof(typeof(ent), item), \
76 sizeof(ent.item), \
43b51ead 77 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
78 if (ret) \
79 return ret;
80
81int trace_define_common_fields(struct ftrace_event_call *call)
82{
83 int ret;
84 struct trace_entry ent;
85
86 __common_field(unsigned short, type);
87 __common_field(unsigned char, flags);
88 __common_field(unsigned char, preempt_count);
89 __common_field(int, pid);
637e7e86 90 __common_field(int, lock_depth);
e647d6b3
LZ
91
92 return ret;
93}
540b7b8d 94EXPORT_SYMBOL_GPL(trace_define_common_fields);
e647d6b3 95
bd1a5c84 96void trace_destroy_fields(struct ftrace_event_call *call)
2df75e41
LZ
97{
98 struct ftrace_event_field *field, *next;
99
100 list_for_each_entry_safe(field, next, &call->fields, link) {
101 list_del(&field->link);
102 kfree(field->type);
103 kfree(field->name);
104 kfree(field);
105 }
106}
107
87d9b4e1
LZ
108int trace_event_raw_init(struct ftrace_event_call *call)
109{
110 int id;
111
112 id = register_ftrace_event(call->event);
113 if (!id)
114 return -ENODEV;
115 call->id = id;
116 INIT_LIST_HEAD(&call->fields);
117
118 return 0;
119}
120EXPORT_SYMBOL_GPL(trace_event_raw_init);
121
fd994989
SR
122static void ftrace_event_enable_disable(struct ftrace_event_call *call,
123 int enable)
124{
fd994989
SR
125 switch (enable) {
126 case 0:
127 if (call->enabled) {
128 call->enabled = 0;
b11c53e1 129 tracing_stop_cmdline_record();
bd1a5c84 130 call->unregfunc(call);
fd994989 131 }
fd994989
SR
132 break;
133 case 1:
da4d0302 134 if (!call->enabled) {
fd994989 135 call->enabled = 1;
b11c53e1 136 tracing_start_cmdline_record();
bd1a5c84 137 call->regfunc(call);
fd994989 138 }
fd994989
SR
139 break;
140 }
141}
142
0e907c99
Z
143static void ftrace_clear_events(void)
144{
145 struct ftrace_event_call *call;
146
147 mutex_lock(&event_mutex);
148 list_for_each_entry(call, &ftrace_events, list) {
149 ftrace_event_enable_disable(call, 0);
150 }
151 mutex_unlock(&event_mutex);
152}
153
8f31bfe5
LZ
154/*
155 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
156 */
157static int __ftrace_set_clr_event(const char *match, const char *sub,
158 const char *event, int set)
b77e38aa 159{
a59fd602 160 struct ftrace_event_call *call;
29f93943 161 int ret = -EINVAL;
8f31bfe5
LZ
162
163 mutex_lock(&event_mutex);
164 list_for_each_entry(call, &ftrace_events, list) {
165
166 if (!call->name || !call->regfunc)
167 continue;
168
169 if (match &&
170 strcmp(match, call->name) != 0 &&
171 strcmp(match, call->system) != 0)
172 continue;
173
174 if (sub && strcmp(sub, call->system) != 0)
175 continue;
176
177 if (event && strcmp(event, call->name) != 0)
178 continue;
179
180 ftrace_event_enable_disable(call, set);
181
182 ret = 0;
183 }
184 mutex_unlock(&event_mutex);
185
186 return ret;
187}
188
189static int ftrace_set_clr_event(char *buf, int set)
190{
b628b3e6 191 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
192
193 /*
194 * The buf format can be <subsystem>:<event-name>
195 * *:<event-name> means any event by that name.
196 * :<event-name> is the same.
197 *
198 * <subsystem>:* means all events in that subsystem
199 * <subsystem>: means the same.
200 *
201 * <name> (no ':') means all events in a subsystem with
202 * the name <name> or any event that matches <name>
203 */
204
205 match = strsep(&buf, ":");
206 if (buf) {
207 sub = match;
208 event = buf;
209 match = NULL;
210
211 if (!strlen(sub) || strcmp(sub, "*") == 0)
212 sub = NULL;
213 if (!strlen(event) || strcmp(event, "*") == 0)
214 event = NULL;
215 }
b77e38aa 216
8f31bfe5 217 return __ftrace_set_clr_event(match, sub, event, set);
b77e38aa
SR
218}
219
4671c794
SR
220/**
221 * trace_set_clr_event - enable or disable an event
222 * @system: system name to match (NULL for any system)
223 * @event: event name to match (NULL for all events, within system)
224 * @set: 1 to enable, 0 to disable
225 *
226 * This is a way for other parts of the kernel to enable or disable
227 * event recording.
228 *
229 * Returns 0 on success, -EINVAL if the parameters do not match any
230 * registered events.
231 */
232int trace_set_clr_event(const char *system, const char *event, int set)
233{
234 return __ftrace_set_clr_event(NULL, system, event, set);
235}
236
b77e38aa
SR
237/* 128 should be much more than enough */
238#define EVENT_BUF_SIZE 127
239
240static ssize_t
241ftrace_event_write(struct file *file, const char __user *ubuf,
242 size_t cnt, loff_t *ppos)
243{
48966364 244 struct trace_parser parser;
4ba7978e 245 ssize_t read, ret;
b77e38aa 246
4ba7978e 247 if (!cnt)
b77e38aa
SR
248 return 0;
249
1852fcce
SR
250 ret = tracing_update_buffers();
251 if (ret < 0)
252 return ret;
253
48966364 254 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
255 return -ENOMEM;
256
48966364 257 read = trace_get_user(&parser, ubuf, cnt, ppos);
258
4ba7978e 259 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 260 int set = 1;
b77e38aa 261
48966364 262 if (*parser.buffer == '!')
b77e38aa 263 set = 0;
b77e38aa 264
48966364 265 parser.buffer[parser.idx] = 0;
266
267 ret = ftrace_set_clr_event(parser.buffer + !set, set);
b77e38aa 268 if (ret)
48966364 269 goto out_put;
b77e38aa 270 }
b77e38aa
SR
271
272 ret = read;
273
48966364 274 out_put:
275 trace_parser_put(&parser);
b77e38aa
SR
276
277 return ret;
278}
279
280static void *
281t_next(struct seq_file *m, void *v, loff_t *pos)
282{
30bd39cd 283 struct ftrace_event_call *call = v;
b77e38aa
SR
284
285 (*pos)++;
286
30bd39cd 287 list_for_each_entry_continue(call, &ftrace_events, list) {
40e26815
SR
288 /*
289 * The ftrace subsystem is for showing formats only.
290 * They can not be enabled or disabled via the event files.
291 */
292 if (call->regfunc)
30bd39cd 293 return call;
40e26815 294 }
b77e38aa 295
30bd39cd 296 return NULL;
b77e38aa
SR
297}
298
299static void *t_start(struct seq_file *m, loff_t *pos)
300{
30bd39cd 301 struct ftrace_event_call *call;
e1c7e2a6
LZ
302 loff_t l;
303
20c8928a 304 mutex_lock(&event_mutex);
e1c7e2a6 305
30bd39cd 306 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 307 for (l = 0; l <= *pos; ) {
30bd39cd 308 call = t_next(m, call, &l);
e1c7e2a6
LZ
309 if (!call)
310 break;
311 }
312 return call;
b77e38aa
SR
313}
314
315static void *
316s_next(struct seq_file *m, void *v, loff_t *pos)
317{
30bd39cd 318 struct ftrace_event_call *call = v;
b77e38aa
SR
319
320 (*pos)++;
321
30bd39cd
LZ
322 list_for_each_entry_continue(call, &ftrace_events, list) {
323 if (call->enabled)
324 return call;
b77e38aa
SR
325 }
326
30bd39cd 327 return NULL;
b77e38aa
SR
328}
329
330static void *s_start(struct seq_file *m, loff_t *pos)
331{
30bd39cd 332 struct ftrace_event_call *call;
e1c7e2a6
LZ
333 loff_t l;
334
20c8928a 335 mutex_lock(&event_mutex);
e1c7e2a6 336
30bd39cd 337 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
e1c7e2a6 338 for (l = 0; l <= *pos; ) {
30bd39cd 339 call = s_next(m, call, &l);
e1c7e2a6
LZ
340 if (!call)
341 break;
342 }
343 return call;
b77e38aa
SR
344}
345
346static int t_show(struct seq_file *m, void *v)
347{
348 struct ftrace_event_call *call = v;
349
b628b3e6
SR
350 if (strcmp(call->system, TRACE_SYSTEM) != 0)
351 seq_printf(m, "%s:", call->system);
b77e38aa
SR
352 seq_printf(m, "%s\n", call->name);
353
354 return 0;
355}
356
357static void t_stop(struct seq_file *m, void *p)
358{
20c8928a 359 mutex_unlock(&event_mutex);
b77e38aa
SR
360}
361
362static int
363ftrace_event_seq_open(struct inode *inode, struct file *file)
364{
b77e38aa
SR
365 const struct seq_operations *seq_ops;
366
367 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 368 (file->f_flags & O_TRUNC))
b77e38aa
SR
369 ftrace_clear_events();
370
371 seq_ops = inode->i_private;
20c8928a 372 return seq_open(file, seq_ops);
b77e38aa
SR
373}
374
1473e441
SR
375static ssize_t
376event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
377 loff_t *ppos)
378{
379 struct ftrace_event_call *call = filp->private_data;
380 char *buf;
381
da4d0302 382 if (call->enabled)
1473e441
SR
383 buf = "1\n";
384 else
385 buf = "0\n";
386
387 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
388}
389
390static ssize_t
391event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
392 loff_t *ppos)
393{
394 struct ftrace_event_call *call = filp->private_data;
395 char buf[64];
396 unsigned long val;
397 int ret;
398
399 if (cnt >= sizeof(buf))
400 return -EINVAL;
401
402 if (copy_from_user(&buf, ubuf, cnt))
403 return -EFAULT;
404
405 buf[cnt] = 0;
406
407 ret = strict_strtoul(buf, 10, &val);
408 if (ret < 0)
409 return ret;
410
1852fcce
SR
411 ret = tracing_update_buffers();
412 if (ret < 0)
413 return ret;
414
1473e441
SR
415 switch (val) {
416 case 0:
1473e441 417 case 1:
11a241a3 418 mutex_lock(&event_mutex);
fd994989 419 ftrace_event_enable_disable(call, val);
11a241a3 420 mutex_unlock(&event_mutex);
1473e441
SR
421 break;
422
423 default:
424 return -EINVAL;
425 }
426
427 *ppos += cnt;
428
429 return cnt;
430}
431
8ae79a13
SR
432static ssize_t
433system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
434 loff_t *ppos)
435{
c142b15d 436 const char set_to_char[4] = { '?', '0', '1', 'X' };
8ae79a13
SR
437 const char *system = filp->private_data;
438 struct ftrace_event_call *call;
439 char buf[2];
c142b15d 440 int set = 0;
8ae79a13
SR
441 int ret;
442
8ae79a13
SR
443 mutex_lock(&event_mutex);
444 list_for_each_entry(call, &ftrace_events, list) {
445 if (!call->name || !call->regfunc)
446 continue;
447
8f31bfe5 448 if (system && strcmp(call->system, system) != 0)
8ae79a13
SR
449 continue;
450
451 /*
452 * We need to find out if all the events are set
453 * or if all events or cleared, or if we have
454 * a mixture.
455 */
c142b15d
LZ
456 set |= (1 << !!call->enabled);
457
8ae79a13
SR
458 /*
459 * If we have a mixture, no need to look further.
460 */
c142b15d 461 if (set == 3)
8ae79a13
SR
462 break;
463 }
464 mutex_unlock(&event_mutex);
465
c142b15d 466 buf[0] = set_to_char[set];
8ae79a13 467 buf[1] = '\n';
8ae79a13
SR
468
469 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
470
471 return ret;
472}
473
474static ssize_t
475system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
476 loff_t *ppos)
477{
478 const char *system = filp->private_data;
479 unsigned long val;
8ae79a13
SR
480 char buf[64];
481 ssize_t ret;
482
483 if (cnt >= sizeof(buf))
484 return -EINVAL;
485
486 if (copy_from_user(&buf, ubuf, cnt))
487 return -EFAULT;
488
489 buf[cnt] = 0;
490
491 ret = strict_strtoul(buf, 10, &val);
492 if (ret < 0)
493 return ret;
494
495 ret = tracing_update_buffers();
496 if (ret < 0)
497 return ret;
498
8f31bfe5 499 if (val != 0 && val != 1)
8ae79a13 500 return -EINVAL;
8ae79a13 501
8f31bfe5 502 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
8ae79a13 503 if (ret)
8f31bfe5 504 goto out;
8ae79a13
SR
505
506 ret = cnt;
507
8f31bfe5 508out:
8ae79a13
SR
509 *ppos += cnt;
510
511 return ret;
512}
513
75db37d2
SR
514extern char *__bad_type_size(void);
515
91729ef9 516#undef FIELD
156b5f17 517#define FIELD(type, name) \
75db37d2 518 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
cf027f64 519 #type, "common_" #name, offsetof(typeof(field), name), \
26a50744 520 sizeof(field.name), is_signed_type(type)
91729ef9
SR
521
522static int trace_write_header(struct trace_seq *s)
523{
524 struct trace_entry field;
525
526 /* struct trace_entry */
527 return trace_seq_printf(s,
26a50744
TZ
528 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
529 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
530 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
531 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
532 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
533 "\n",
534 FIELD(unsigned short, type),
535 FIELD(unsigned char, flags),
536 FIELD(unsigned char, preempt_count),
537 FIELD(int, pid),
538 FIELD(int, lock_depth));
91729ef9 539}
da4d0302 540
981d081e
SR
541static ssize_t
542event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
543 loff_t *ppos)
544{
545 struct ftrace_event_call *call = filp->private_data;
546 struct trace_seq *s;
547 char *buf;
548 int r;
549
c269fc8c
TZ
550 if (*ppos)
551 return 0;
552
981d081e
SR
553 s = kmalloc(sizeof(*s), GFP_KERNEL);
554 if (!s)
555 return -ENOMEM;
556
557 trace_seq_init(s);
558
c5e4e192
SR
559 /* If any of the first writes fail, so will the show_format. */
560
561 trace_seq_printf(s, "name: %s\n", call->name);
562 trace_seq_printf(s, "ID: %d\n", call->id);
563 trace_seq_printf(s, "format:\n");
91729ef9
SR
564 trace_write_header(s);
565
e8f9f4d7 566 r = call->show_format(call, s);
981d081e
SR
567 if (!r) {
568 /*
569 * ug! The format output is bigger than a PAGE!!
570 */
571 buf = "FORMAT TOO BIG\n";
572 r = simple_read_from_buffer(ubuf, cnt, ppos,
573 buf, strlen(buf));
574 goto out;
575 }
576
577 r = simple_read_from_buffer(ubuf, cnt, ppos,
578 s->buffer, s->len);
579 out:
580 kfree(s);
581 return r;
582}
583
23725aee
PZ
584static ssize_t
585event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
586{
587 struct ftrace_event_call *call = filp->private_data;
588 struct trace_seq *s;
589 int r;
590
591 if (*ppos)
592 return 0;
593
594 s = kmalloc(sizeof(*s), GFP_KERNEL);
595 if (!s)
596 return -ENOMEM;
597
598 trace_seq_init(s);
599 trace_seq_printf(s, "%d\n", call->id);
600
601 r = simple_read_from_buffer(ubuf, cnt, ppos,
602 s->buffer, s->len);
603 kfree(s);
604 return r;
605}
606
7ce7e424
TZ
607static ssize_t
608event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
609 loff_t *ppos)
610{
611 struct ftrace_event_call *call = filp->private_data;
612 struct trace_seq *s;
613 int r;
614
615 if (*ppos)
616 return 0;
617
618 s = kmalloc(sizeof(*s), GFP_KERNEL);
619 if (!s)
620 return -ENOMEM;
621
622 trace_seq_init(s);
623
8b372562 624 print_event_filter(call, s);
4bda2d51 625 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
626
627 kfree(s);
628
629 return r;
630}
631
632static ssize_t
633event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
634 loff_t *ppos)
635{
636 struct ftrace_event_call *call = filp->private_data;
8b372562 637 char *buf;
7ce7e424
TZ
638 int err;
639
8b372562 640 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
641 return -EINVAL;
642
8b372562
TZ
643 buf = (char *)__get_free_page(GFP_TEMPORARY);
644 if (!buf)
7ce7e424
TZ
645 return -ENOMEM;
646
8b372562
TZ
647 if (copy_from_user(buf, ubuf, cnt)) {
648 free_page((unsigned long) buf);
649 return -EFAULT;
7ce7e424 650 }
8b372562 651 buf[cnt] = '\0';
7ce7e424 652
8b372562
TZ
653 err = apply_event_filter(call, buf);
654 free_page((unsigned long) buf);
655 if (err < 0)
44e9c8b7 656 return err;
0a19e53c 657
7ce7e424
TZ
658 *ppos += cnt;
659
660 return cnt;
661}
662
cfb180f3
TZ
663static ssize_t
664subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
665 loff_t *ppos)
666{
667 struct event_subsystem *system = filp->private_data;
668 struct trace_seq *s;
669 int r;
670
671 if (*ppos)
672 return 0;
673
674 s = kmalloc(sizeof(*s), GFP_KERNEL);
675 if (!s)
676 return -ENOMEM;
677
678 trace_seq_init(s);
679
8b372562 680 print_subsystem_event_filter(system, s);
4bda2d51 681 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
682
683 kfree(s);
684
685 return r;
686}
687
688static ssize_t
689subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
690 loff_t *ppos)
691{
692 struct event_subsystem *system = filp->private_data;
8b372562 693 char *buf;
cfb180f3
TZ
694 int err;
695
8b372562 696 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
697 return -EINVAL;
698
8b372562
TZ
699 buf = (char *)__get_free_page(GFP_TEMPORARY);
700 if (!buf)
cfb180f3
TZ
701 return -ENOMEM;
702
8b372562
TZ
703 if (copy_from_user(buf, ubuf, cnt)) {
704 free_page((unsigned long) buf);
705 return -EFAULT;
cfb180f3 706 }
8b372562 707 buf[cnt] = '\0';
cfb180f3 708
8b372562
TZ
709 err = apply_subsystem_event_filter(system, buf);
710 free_page((unsigned long) buf);
711 if (err < 0)
44e9c8b7 712 return err;
cfb180f3
TZ
713
714 *ppos += cnt;
715
716 return cnt;
717}
718
d1b182a8
SR
719static ssize_t
720show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
721{
722 int (*func)(struct trace_seq *s) = filp->private_data;
723 struct trace_seq *s;
724 int r;
725
726 if (*ppos)
727 return 0;
728
729 s = kmalloc(sizeof(*s), GFP_KERNEL);
730 if (!s)
731 return -ENOMEM;
732
733 trace_seq_init(s);
734
735 func(s);
736 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
737
738 kfree(s);
739
740 return r;
741}
742
b77e38aa
SR
743static const struct seq_operations show_event_seq_ops = {
744 .start = t_start,
745 .next = t_next,
746 .show = t_show,
747 .stop = t_stop,
748};
749
750static const struct seq_operations show_set_event_seq_ops = {
751 .start = s_start,
752 .next = s_next,
753 .show = t_show,
754 .stop = t_stop,
755};
756
2314c4ae
SR
757static const struct file_operations ftrace_avail_fops = {
758 .open = ftrace_event_seq_open,
759 .read = seq_read,
760 .llseek = seq_lseek,
761 .release = seq_release,
762};
763
b77e38aa
SR
764static const struct file_operations ftrace_set_event_fops = {
765 .open = ftrace_event_seq_open,
766 .read = seq_read,
767 .write = ftrace_event_write,
768 .llseek = seq_lseek,
769 .release = seq_release,
770};
771
1473e441
SR
772static const struct file_operations ftrace_enable_fops = {
773 .open = tracing_open_generic,
774 .read = event_enable_read,
775 .write = event_enable_write,
776};
777
981d081e
SR
778static const struct file_operations ftrace_event_format_fops = {
779 .open = tracing_open_generic,
780 .read = event_format_read,
781};
782
23725aee
PZ
783static const struct file_operations ftrace_event_id_fops = {
784 .open = tracing_open_generic,
785 .read = event_id_read,
786};
787
7ce7e424
TZ
788static const struct file_operations ftrace_event_filter_fops = {
789 .open = tracing_open_generic,
790 .read = event_filter_read,
791 .write = event_filter_write,
792};
793
cfb180f3
TZ
794static const struct file_operations ftrace_subsystem_filter_fops = {
795 .open = tracing_open_generic,
796 .read = subsystem_filter_read,
797 .write = subsystem_filter_write,
798};
799
8ae79a13
SR
800static const struct file_operations ftrace_system_enable_fops = {
801 .open = tracing_open_generic,
802 .read = system_enable_read,
803 .write = system_enable_write,
804};
805
d1b182a8
SR
806static const struct file_operations ftrace_show_header_fops = {
807 .open = tracing_open_generic,
808 .read = show_header,
809};
810
1473e441
SR
811static struct dentry *event_trace_events_dir(void)
812{
813 static struct dentry *d_tracer;
814 static struct dentry *d_events;
815
816 if (d_events)
817 return d_events;
818
819 d_tracer = tracing_init_dentry();
820 if (!d_tracer)
821 return NULL;
822
823 d_events = debugfs_create_dir("events", d_tracer);
824 if (!d_events)
825 pr_warning("Could not create debugfs "
826 "'events' directory\n");
827
828 return d_events;
829}
830
6ecc2d1c
SR
831static LIST_HEAD(event_subsystems);
832
833static struct dentry *
834event_subsystem_dir(const char *name, struct dentry *d_events)
835{
836 struct event_subsystem *system;
e1112b4d 837 struct dentry *entry;
6ecc2d1c
SR
838
839 /* First see if we did not already create this dir */
840 list_for_each_entry(system, &event_subsystems, list) {
dc82ec98
XG
841 if (strcmp(system->name, name) == 0) {
842 system->nr_events++;
6ecc2d1c 843 return system->entry;
dc82ec98 844 }
6ecc2d1c
SR
845 }
846
847 /* need to create new entry */
848 system = kmalloc(sizeof(*system), GFP_KERNEL);
849 if (!system) {
850 pr_warning("No memory to create event subsystem %s\n",
851 name);
852 return d_events;
853 }
854
855 system->entry = debugfs_create_dir(name, d_events);
856 if (!system->entry) {
857 pr_warning("Could not create event subsystem %s\n",
858 name);
859 kfree(system);
860 return d_events;
861 }
862
dc82ec98 863 system->nr_events = 1;
6d723736
SR
864 system->name = kstrdup(name, GFP_KERNEL);
865 if (!system->name) {
866 debugfs_remove(system->entry);
867 kfree(system);
868 return d_events;
869 }
870
6ecc2d1c
SR
871 list_add(&system->list, &event_subsystems);
872
30e673b2 873 system->filter = NULL;
cfb180f3 874
8b372562
TZ
875 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
876 if (!system->filter) {
877 pr_warning("Could not allocate filter for subsystem "
878 "'%s'\n", name);
879 return system->entry;
880 }
881
e1112b4d
TZ
882 entry = debugfs_create_file("filter", 0644, system->entry, system,
883 &ftrace_subsystem_filter_fops);
8b372562
TZ
884 if (!entry) {
885 kfree(system->filter);
886 system->filter = NULL;
e1112b4d
TZ
887 pr_warning("Could not create debugfs "
888 "'%s/filter' entry\n", name);
8b372562 889 }
e1112b4d 890
f3f3f009
FW
891 trace_create_file("enable", 0644, system->entry,
892 (void *)system->name,
893 &ftrace_system_enable_fops);
8ae79a13 894
6ecc2d1c
SR
895 return system->entry;
896}
897
1473e441 898static int
701970b3
SR
899event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
900 const struct file_operations *id,
901 const struct file_operations *enable,
902 const struct file_operations *filter,
903 const struct file_operations *format)
1473e441 904{
fd994989 905 int ret;
1473e441 906
6ecc2d1c
SR
907 /*
908 * If the trace point header did not define TRACE_SYSTEM
909 * then the system would be called "TRACE_SYSTEM".
910 */
6d723736 911 if (strcmp(call->system, TRACE_SYSTEM) != 0)
6ecc2d1c
SR
912 d_events = event_subsystem_dir(call->system, d_events);
913
1473e441
SR
914 call->dir = debugfs_create_dir(call->name, d_events);
915 if (!call->dir) {
916 pr_warning("Could not create debugfs "
917 "'%s' directory\n", call->name);
918 return -1;
919 }
920
6d723736 921 if (call->regfunc)
f3f3f009
FW
922 trace_create_file("enable", 0644, call->dir, call,
923 enable);
1473e441 924
af6af30c 925 if (call->id && call->profile_enable)
f3f3f009
FW
926 trace_create_file("id", 0444, call->dir, call,
927 id);
23725aee 928
cf027f64 929 if (call->define_fields) {
14be96c9 930 ret = call->define_fields(call);
cf027f64
TZ
931 if (ret < 0) {
932 pr_warning("Could not initialize trace point"
933 " events/%s\n", call->name);
934 return ret;
935 }
f3f3f009
FW
936 trace_create_file("filter", 0644, call->dir, call,
937 filter);
cf027f64
TZ
938 }
939
981d081e
SR
940 /* A trace may not want to export its format */
941 if (!call->show_format)
942 return 0;
943
f3f3f009
FW
944 trace_create_file("format", 0444, call->dir, call,
945 format);
6d723736
SR
946
947 return 0;
948}
949
bd1a5c84
MH
950static int __trace_add_event_call(struct ftrace_event_call *call)
951{
952 struct dentry *d_events;
953 int ret;
6d723736 954
bd1a5c84
MH
955 if (!call->name)
956 return -EINVAL;
701970b3 957
bd1a5c84
MH
958 if (call->raw_init) {
959 ret = call->raw_init(call);
960 if (ret < 0) {
961 if (ret != -ENOSYS)
962 pr_warning("Could not initialize trace "
963 "events/%s\n", call->name);
964 return ret;
965 }
966 }
701970b3 967
bd1a5c84
MH
968 d_events = event_trace_events_dir();
969 if (!d_events)
970 return -ENOENT;
971
588bebb7 972 ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
bd1a5c84
MH
973 &ftrace_enable_fops, &ftrace_event_filter_fops,
974 &ftrace_event_format_fops);
88f70d75
MH
975 if (!ret)
976 list_add(&call->list, &ftrace_events);
977
588bebb7 978 return ret;
bd1a5c84
MH
979}
980
981/* Add an additional event_call dynamically */
982int trace_add_event_call(struct ftrace_event_call *call)
983{
984 int ret;
985 mutex_lock(&event_mutex);
986 ret = __trace_add_event_call(call);
987 mutex_unlock(&event_mutex);
988 return ret;
989}
701970b3 990
a2ca5e03
FW
991static void remove_subsystem_dir(const char *name)
992{
993 struct event_subsystem *system;
994
995 if (strcmp(name, TRACE_SYSTEM) == 0)
996 return;
997
998 list_for_each_entry(system, &event_subsystems, list) {
999 if (strcmp(system->name, name) == 0) {
1000 if (!--system->nr_events) {
1001 struct event_filter *filter = system->filter;
1002
1003 debugfs_remove_recursive(system->entry);
1004 list_del(&system->list);
1005 if (filter) {
1006 kfree(filter->filter_string);
1007 kfree(filter);
1008 }
1009 kfree(system->name);
1010 kfree(system);
1011 }
1012 break;
1013 }
1014 }
1015}
1016
4fead8e4
MH
1017/*
1018 * Must be called under locking both of event_mutex and trace_event_mutex.
1019 */
bd1a5c84
MH
1020static void __trace_remove_event_call(struct ftrace_event_call *call)
1021{
1022 ftrace_event_enable_disable(call, 0);
1023 if (call->event)
1024 __unregister_ftrace_event(call->event);
1025 debugfs_remove_recursive(call->dir);
1026 list_del(&call->list);
1027 trace_destroy_fields(call);
1028 destroy_preds(call);
1029 remove_subsystem_dir(call->system);
1030}
1031
1032/* Remove an event_call */
1033void trace_remove_event_call(struct ftrace_event_call *call)
1034{
1035 mutex_lock(&event_mutex);
4fead8e4 1036 down_write(&trace_event_mutex);
bd1a5c84 1037 __trace_remove_event_call(call);
4fead8e4 1038 up_write(&trace_event_mutex);
bd1a5c84
MH
1039 mutex_unlock(&event_mutex);
1040}
1041
1042#define for_each_event(event, start, end) \
1043 for (event = start; \
1044 (unsigned long)event < (unsigned long)end; \
1045 event++)
1046
1047#ifdef CONFIG_MODULES
1048
1049static LIST_HEAD(ftrace_module_file_list);
1050
1051/*
1052 * Modules must own their file_operations to keep up with
1053 * reference counting.
1054 */
1055struct ftrace_module_file_ops {
1056 struct list_head list;
1057 struct module *mod;
1058 struct file_operations id;
1059 struct file_operations enable;
1060 struct file_operations format;
1061 struct file_operations filter;
1062};
1063
701970b3
SR
1064static struct ftrace_module_file_ops *
1065trace_create_file_ops(struct module *mod)
1066{
1067 struct ftrace_module_file_ops *file_ops;
1068
1069 /*
1070 * This is a bit of a PITA. To allow for correct reference
1071 * counting, modules must "own" their file_operations.
1072 * To do this, we allocate the file operations that will be
1073 * used in the event directory.
1074 */
1075
1076 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1077 if (!file_ops)
1078 return NULL;
1079
1080 file_ops->mod = mod;
1081
1082 file_ops->id = ftrace_event_id_fops;
1083 file_ops->id.owner = mod;
1084
1085 file_ops->enable = ftrace_enable_fops;
1086 file_ops->enable.owner = mod;
1087
1088 file_ops->filter = ftrace_event_filter_fops;
1089 file_ops->filter.owner = mod;
1090
1091 file_ops->format = ftrace_event_format_fops;
1092 file_ops->format.owner = mod;
1093
1094 list_add(&file_ops->list, &ftrace_module_file_list);
1095
1096 return file_ops;
1097}
1098
6d723736
SR
1099static void trace_module_add_events(struct module *mod)
1100{
701970b3 1101 struct ftrace_module_file_ops *file_ops = NULL;
6d723736
SR
1102 struct ftrace_event_call *call, *start, *end;
1103 struct dentry *d_events;
f744bd57 1104 int ret;
6d723736
SR
1105
1106 start = mod->trace_events;
1107 end = mod->trace_events + mod->num_trace_events;
1108
1109 if (start == end)
1110 return;
1111
1112 d_events = event_trace_events_dir();
1113 if (!d_events)
1114 return;
1115
1116 for_each_event(call, start, end) {
1117 /* The linker may leave blanks */
1118 if (!call->name)
1119 continue;
f744bd57 1120 if (call->raw_init) {
bd1a5c84 1121 ret = call->raw_init(call);
f744bd57
JB
1122 if (ret < 0) {
1123 if (ret != -ENOSYS)
1124 pr_warning("Could not initialize trace "
1125 "point events/%s\n", call->name);
1126 continue;
1127 }
1128 }
701970b3
SR
1129 /*
1130 * This module has events, create file ops for this module
1131 * if not already done.
1132 */
1133 if (!file_ops) {
1134 file_ops = trace_create_file_ops(mod);
1135 if (!file_ops)
1136 return;
1137 }
6d723736 1138 call->mod = mod;
88f70d75
MH
1139 ret = event_create_dir(call, d_events,
1140 &file_ops->id, &file_ops->enable,
1141 &file_ops->filter, &file_ops->format);
1142 if (!ret)
1143 list_add(&call->list, &ftrace_events);
6d723736
SR
1144 }
1145}
1146
1147static void trace_module_remove_events(struct module *mod)
1148{
701970b3 1149 struct ftrace_module_file_ops *file_ops;
6d723736 1150 struct ftrace_event_call *call, *p;
9456f0fa 1151 bool found = false;
6d723736 1152
110bf2b7 1153 down_write(&trace_event_mutex);
6d723736
SR
1154 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1155 if (call->mod == mod) {
9456f0fa 1156 found = true;
bd1a5c84 1157 __trace_remove_event_call(call);
6d723736
SR
1158 }
1159 }
701970b3
SR
1160
1161 /* Now free the file_operations */
1162 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1163 if (file_ops->mod == mod)
1164 break;
1165 }
1166 if (&file_ops->list != &ftrace_module_file_list) {
1167 list_del(&file_ops->list);
1168 kfree(file_ops);
1169 }
9456f0fa
SR
1170
1171 /*
1172 * It is safest to reset the ring buffer if the module being unloaded
1173 * registered any events.
1174 */
1175 if (found)
1176 tracing_reset_current_online_cpus();
110bf2b7 1177 up_write(&trace_event_mutex);
6d723736
SR
1178}
1179
61f919a1
SR
1180static int trace_module_notify(struct notifier_block *self,
1181 unsigned long val, void *data)
6d723736
SR
1182{
1183 struct module *mod = data;
1184
1185 mutex_lock(&event_mutex);
1186 switch (val) {
1187 case MODULE_STATE_COMING:
1188 trace_module_add_events(mod);
1189 break;
1190 case MODULE_STATE_GOING:
1191 trace_module_remove_events(mod);
1192 break;
1193 }
1194 mutex_unlock(&event_mutex);
fd994989 1195
1473e441
SR
1196 return 0;
1197}
61f919a1
SR
1198#else
1199static int trace_module_notify(struct notifier_block *self,
1200 unsigned long val, void *data)
1201{
1202 return 0;
1203}
1204#endif /* CONFIG_MODULES */
1473e441 1205
ec827c7e 1206static struct notifier_block trace_module_nb = {
6d723736
SR
1207 .notifier_call = trace_module_notify,
1208 .priority = 0,
1209};
1210
a59fd602
SR
1211extern struct ftrace_event_call __start_ftrace_events[];
1212extern struct ftrace_event_call __stop_ftrace_events[];
1213
020e5f85
LZ
1214static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1215
1216static __init int setup_trace_event(char *str)
1217{
1218 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1219 ring_buffer_expanded = 1;
1220 tracing_selftest_disabled = 1;
1221
1222 return 1;
1223}
1224__setup("trace_event=", setup_trace_event);
1225
b77e38aa
SR
1226static __init int event_trace_init(void)
1227{
a59fd602 1228 struct ftrace_event_call *call;
b77e38aa
SR
1229 struct dentry *d_tracer;
1230 struct dentry *entry;
1473e441 1231 struct dentry *d_events;
6d723736 1232 int ret;
020e5f85
LZ
1233 char *buf = bootup_event_buf;
1234 char *token;
b77e38aa
SR
1235
1236 d_tracer = tracing_init_dentry();
1237 if (!d_tracer)
1238 return 0;
1239
2314c4ae
SR
1240 entry = debugfs_create_file("available_events", 0444, d_tracer,
1241 (void *)&show_event_seq_ops,
1242 &ftrace_avail_fops);
1243 if (!entry)
1244 pr_warning("Could not create debugfs "
1245 "'available_events' entry\n");
1246
b77e38aa
SR
1247 entry = debugfs_create_file("set_event", 0644, d_tracer,
1248 (void *)&show_set_event_seq_ops,
1249 &ftrace_set_event_fops);
1250 if (!entry)
1251 pr_warning("Could not create debugfs "
1252 "'set_event' entry\n");
1253
1473e441
SR
1254 d_events = event_trace_events_dir();
1255 if (!d_events)
1256 return 0;
1257
d1b182a8
SR
1258 /* ring buffer internal formats */
1259 trace_create_file("header_page", 0444, d_events,
1260 ring_buffer_print_page_header,
1261 &ftrace_show_header_fops);
1262
1263 trace_create_file("header_event", 0444, d_events,
1264 ring_buffer_print_entry_header,
1265 &ftrace_show_header_fops);
1266
8ae79a13 1267 trace_create_file("enable", 0644, d_events,
8f31bfe5 1268 NULL, &ftrace_system_enable_fops);
8ae79a13 1269
6d723736 1270 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1473e441
SR
1271 /* The linker may leave blanks */
1272 if (!call->name)
1273 continue;
f744bd57 1274 if (call->raw_init) {
bd1a5c84 1275 ret = call->raw_init(call);
f744bd57
JB
1276 if (ret < 0) {
1277 if (ret != -ENOSYS)
1278 pr_warning("Could not initialize trace "
1279 "point events/%s\n", call->name);
1280 continue;
1281 }
1282 }
88f70d75
MH
1283 ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
1284 &ftrace_enable_fops,
1285 &ftrace_event_filter_fops,
1286 &ftrace_event_format_fops);
1287 if (!ret)
1288 list_add(&call->list, &ftrace_events);
1473e441
SR
1289 }
1290
020e5f85
LZ
1291 while (true) {
1292 token = strsep(&buf, ",");
1293
1294 if (!token)
1295 break;
1296 if (!*token)
1297 continue;
1298
1299 ret = ftrace_set_clr_event(token, 1);
1300 if (ret)
1301 pr_warning("Failed to enable trace event: %s\n", token);
1302 }
1303
6d723736 1304 ret = register_module_notifier(&trace_module_nb);
55379376 1305 if (ret)
6d723736
SR
1306 pr_warning("Failed to register trace events module notifier\n");
1307
b77e38aa
SR
1308 return 0;
1309}
1310fs_initcall(event_trace_init);
e6187007
SR
1311
1312#ifdef CONFIG_FTRACE_STARTUP_TEST
1313
1314static DEFINE_SPINLOCK(test_spinlock);
1315static DEFINE_SPINLOCK(test_spinlock_irq);
1316static DEFINE_MUTEX(test_mutex);
1317
1318static __init void test_work(struct work_struct *dummy)
1319{
1320 spin_lock(&test_spinlock);
1321 spin_lock_irq(&test_spinlock_irq);
1322 udelay(1);
1323 spin_unlock_irq(&test_spinlock_irq);
1324 spin_unlock(&test_spinlock);
1325
1326 mutex_lock(&test_mutex);
1327 msleep(1);
1328 mutex_unlock(&test_mutex);
1329}
1330
1331static __init int event_test_thread(void *unused)
1332{
1333 void *test_malloc;
1334
1335 test_malloc = kmalloc(1234, GFP_KERNEL);
1336 if (!test_malloc)
1337 pr_info("failed to kmalloc\n");
1338
1339 schedule_on_each_cpu(test_work);
1340
1341 kfree(test_malloc);
1342
1343 set_current_state(TASK_INTERRUPTIBLE);
1344 while (!kthread_should_stop())
1345 schedule();
1346
1347 return 0;
1348}
1349
1350/*
1351 * Do various things that may trigger events.
1352 */
1353static __init void event_test_stuff(void)
1354{
1355 struct task_struct *test_thread;
1356
1357 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1358 msleep(1);
1359 kthread_stop(test_thread);
1360}
1361
1362/*
1363 * For every trace event defined, we will test each trace point separately,
1364 * and then by groups, and finally all trace points.
1365 */
9ea21c1e 1366static __init void event_trace_self_tests(void)
e6187007
SR
1367{
1368 struct ftrace_event_call *call;
1369 struct event_subsystem *system;
e6187007
SR
1370 int ret;
1371
1372 pr_info("Running tests on trace events:\n");
1373
1374 list_for_each_entry(call, &ftrace_events, list) {
1375
1376 /* Only test those that have a regfunc */
1377 if (!call->regfunc)
1378 continue;
1379
1f5a6b45
SR
1380/*
1381 * Testing syscall events here is pretty useless, but
1382 * we still do it if configured. But this is time consuming.
1383 * What we really need is a user thread to perform the
1384 * syscalls as we test.
1385 */
1386#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1387 if (call->system &&
1388 strcmp(call->system, "syscalls") == 0)
1389 continue;
1390#endif
1391
e6187007
SR
1392 pr_info("Testing event %s: ", call->name);
1393
1394 /*
1395 * If an event is already enabled, someone is using
1396 * it and the self test should not be on.
1397 */
1398 if (call->enabled) {
1399 pr_warning("Enabled event during self test!\n");
1400 WARN_ON_ONCE(1);
1401 continue;
1402 }
1403
0e907c99 1404 ftrace_event_enable_disable(call, 1);
e6187007 1405 event_test_stuff();
0e907c99 1406 ftrace_event_enable_disable(call, 0);
e6187007
SR
1407
1408 pr_cont("OK\n");
1409 }
1410
1411 /* Now test at the sub system level */
1412
1413 pr_info("Running tests on trace event systems:\n");
1414
1415 list_for_each_entry(system, &event_subsystems, list) {
1416
1417 /* the ftrace system is special, skip it */
1418 if (strcmp(system->name, "ftrace") == 0)
1419 continue;
1420
1421 pr_info("Testing event system %s: ", system->name);
1422
8f31bfe5 1423 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
e6187007
SR
1424 if (WARN_ON_ONCE(ret)) {
1425 pr_warning("error enabling system %s\n",
1426 system->name);
1427 continue;
1428 }
1429
1430 event_test_stuff();
1431
8f31bfe5 1432 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
e6187007
SR
1433 if (WARN_ON_ONCE(ret))
1434 pr_warning("error disabling system %s\n",
1435 system->name);
1436
1437 pr_cont("OK\n");
1438 }
1439
1440 /* Test with all events enabled */
1441
1442 pr_info("Running tests on all trace events:\n");
1443 pr_info("Testing all events: ");
1444
8f31bfe5 1445 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
e6187007 1446 if (WARN_ON_ONCE(ret)) {
e6187007 1447 pr_warning("error enabling all events\n");
9ea21c1e 1448 return;
e6187007
SR
1449 }
1450
1451 event_test_stuff();
1452
1453 /* reset sysname */
8f31bfe5 1454 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
e6187007
SR
1455 if (WARN_ON_ONCE(ret)) {
1456 pr_warning("error disabling all events\n");
9ea21c1e 1457 return;
e6187007
SR
1458 }
1459
1460 pr_cont("OK\n");
9ea21c1e
SR
1461}
1462
1463#ifdef CONFIG_FUNCTION_TRACER
1464
245b2e70 1465static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
1466
1467static void
1468function_test_events_call(unsigned long ip, unsigned long parent_ip)
1469{
1470 struct ring_buffer_event *event;
e77405ad 1471 struct ring_buffer *buffer;
9ea21c1e
SR
1472 struct ftrace_entry *entry;
1473 unsigned long flags;
1474 long disabled;
1475 int resched;
1476 int cpu;
1477 int pc;
1478
1479 pc = preempt_count();
1480 resched = ftrace_preempt_disable();
1481 cpu = raw_smp_processor_id();
245b2e70 1482 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
1483
1484 if (disabled != 1)
1485 goto out;
1486
1487 local_save_flags(flags);
1488
e77405ad
SR
1489 event = trace_current_buffer_lock_reserve(&buffer,
1490 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
1491 flags, pc);
1492 if (!event)
1493 goto out;
1494 entry = ring_buffer_event_data(event);
1495 entry->ip = ip;
1496 entry->parent_ip = parent_ip;
1497
e77405ad 1498 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
1499
1500 out:
245b2e70 1501 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
1502 ftrace_preempt_enable(resched);
1503}
1504
1505static struct ftrace_ops trace_ops __initdata =
1506{
1507 .func = function_test_events_call,
1508};
1509
1510static __init void event_trace_self_test_with_function(void)
1511{
1512 register_ftrace_function(&trace_ops);
1513 pr_info("Running tests again, along with the function tracer\n");
1514 event_trace_self_tests();
1515 unregister_ftrace_function(&trace_ops);
1516}
1517#else
1518static __init void event_trace_self_test_with_function(void)
1519{
1520}
1521#endif
1522
1523static __init int event_trace_self_tests_init(void)
1524{
020e5f85
LZ
1525 if (!tracing_selftest_disabled) {
1526 event_trace_self_tests();
1527 event_trace_self_test_with_function();
1528 }
e6187007
SR
1529
1530 return 0;
1531}
1532
28d20e2d 1533late_initcall(event_trace_self_tests_init);
e6187007
SR
1534
1535#endif
This page took 0.151998 seconds and 5 git commands to generate.