tracing: Use kmem_cache_alloc instead of kmalloc in trace_events.c
[deliverable/linux.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
5a0e3ad6 18#include <linux/slab.h>
e6187007 19#include <linux/delay.h>
b77e38aa 20
020e5f85
LZ
21#include <asm/setup.h>
22
91729ef9 23#include "trace_output.h"
b77e38aa 24
4e5292ea 25#undef TRACE_SYSTEM
b628b3e6
SR
26#define TRACE_SYSTEM "TRACE_SYSTEM"
27
20c8928a 28DEFINE_MUTEX(event_mutex);
11a241a3 29
04295780
SR
30DEFINE_MUTEX(event_storage_mutex);
31EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33char event_storage[EVENT_STORAGE_SIZE];
34EXPORT_SYMBOL_GPL(event_storage);
35
a59fd602 36LIST_HEAD(ftrace_events);
8728fe50 37LIST_HEAD(ftrace_common_fields);
a59fd602 38
d1a29143
SR
39#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41static struct kmem_cache *field_cachep;
42static struct kmem_cache *file_cachep;
43
ae63b31e
SR
44/* Double loops, do not use break, only goto's work */
45#define do_for_each_event_file(tr, file) \
46 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
47 list_for_each_entry(file, &tr->events, list)
48
49#define do_for_each_event_file_safe(tr, file) \
50 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
51 struct ftrace_event_file *___n; \
52 list_for_each_entry_safe(file, ___n, &tr->events, list)
53
54#define while_for_each_event_file() \
55 }
56
2e33af02
SR
57struct list_head *
58trace_get_fields(struct ftrace_event_call *event_call)
59{
60 if (!event_call->class->get_fields)
61 return &event_call->class->fields;
62 return event_call->class->get_fields(event_call);
63}
64
8728fe50
LZ
65static int __trace_define_field(struct list_head *head, const char *type,
66 const char *name, int offset, int size,
67 int is_signed, int filter_type)
cf027f64
TZ
68{
69 struct ftrace_event_field *field;
70
d1a29143 71 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
cf027f64
TZ
72 if (!field)
73 goto err;
fe9f57f2 74
cf027f64
TZ
75 field->name = kstrdup(name, GFP_KERNEL);
76 if (!field->name)
77 goto err;
fe9f57f2 78
cf027f64
TZ
79 field->type = kstrdup(type, GFP_KERNEL);
80 if (!field->type)
81 goto err;
fe9f57f2 82
43b51ead
LZ
83 if (filter_type == FILTER_OTHER)
84 field->filter_type = filter_assign_type(type);
85 else
86 field->filter_type = filter_type;
87
cf027f64
TZ
88 field->offset = offset;
89 field->size = size;
a118e4d1 90 field->is_signed = is_signed;
aa38e9fc 91
2e33af02 92 list_add(&field->link, head);
cf027f64
TZ
93
94 return 0;
fe9f57f2 95
cf027f64 96err:
7b60997f 97 if (field)
cf027f64 98 kfree(field->name);
d1a29143 99 kmem_cache_free(field_cachep, field);
fe9f57f2 100
cf027f64
TZ
101 return -ENOMEM;
102}
8728fe50
LZ
103
104int trace_define_field(struct ftrace_event_call *call, const char *type,
105 const char *name, int offset, int size, int is_signed,
106 int filter_type)
107{
108 struct list_head *head;
109
110 if (WARN_ON(!call->class))
111 return 0;
112
113 head = trace_get_fields(call);
114 return __trace_define_field(head, type, name, offset, size,
115 is_signed, filter_type);
116}
17c873ec 117EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 118
e647d6b3 119#define __common_field(type, item) \
8728fe50
LZ
120 ret = __trace_define_field(&ftrace_common_fields, #type, \
121 "common_" #item, \
122 offsetof(typeof(ent), item), \
123 sizeof(ent.item), \
124 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
125 if (ret) \
126 return ret;
127
8728fe50 128static int trace_define_common_fields(void)
e647d6b3
LZ
129{
130 int ret;
131 struct trace_entry ent;
132
133 __common_field(unsigned short, type);
134 __common_field(unsigned char, flags);
135 __common_field(unsigned char, preempt_count);
136 __common_field(int, pid);
e647d6b3
LZ
137
138 return ret;
139}
140
bd1a5c84 141void trace_destroy_fields(struct ftrace_event_call *call)
2df75e41
LZ
142{
143 struct ftrace_event_field *field, *next;
2e33af02 144 struct list_head *head;
2df75e41 145
2e33af02
SR
146 head = trace_get_fields(call);
147 list_for_each_entry_safe(field, next, head, link) {
2df75e41
LZ
148 list_del(&field->link);
149 kfree(field->type);
150 kfree(field->name);
d1a29143 151 kmem_cache_free(field_cachep, field);
2df75e41
LZ
152 }
153}
154
87d9b4e1
LZ
155int trace_event_raw_init(struct ftrace_event_call *call)
156{
157 int id;
158
80decc70 159 id = register_ftrace_event(&call->event);
87d9b4e1
LZ
160 if (!id)
161 return -ENODEV;
87d9b4e1
LZ
162
163 return 0;
164}
165EXPORT_SYMBOL_GPL(trace_event_raw_init);
166
ceec0b6f
JO
167int ftrace_event_reg(struct ftrace_event_call *call,
168 enum trace_reg type, void *data)
a1d0ce82 169{
ae63b31e
SR
170 struct ftrace_event_file *file = data;
171
a1d0ce82
SR
172 switch (type) {
173 case TRACE_REG_REGISTER:
174 return tracepoint_probe_register(call->name,
175 call->class->probe,
ae63b31e 176 file);
a1d0ce82
SR
177 case TRACE_REG_UNREGISTER:
178 tracepoint_probe_unregister(call->name,
179 call->class->probe,
ae63b31e 180 file);
a1d0ce82
SR
181 return 0;
182
183#ifdef CONFIG_PERF_EVENTS
184 case TRACE_REG_PERF_REGISTER:
185 return tracepoint_probe_register(call->name,
186 call->class->perf_probe,
187 call);
188 case TRACE_REG_PERF_UNREGISTER:
189 tracepoint_probe_unregister(call->name,
190 call->class->perf_probe,
191 call);
192 return 0;
ceec0b6f
JO
193 case TRACE_REG_PERF_OPEN:
194 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
195 case TRACE_REG_PERF_ADD:
196 case TRACE_REG_PERF_DEL:
ceec0b6f 197 return 0;
a1d0ce82
SR
198#endif
199 }
200 return 0;
201}
202EXPORT_SYMBOL_GPL(ftrace_event_reg);
203
e870e9a1
LZ
204void trace_event_enable_cmd_record(bool enable)
205{
ae63b31e
SR
206 struct ftrace_event_file *file;
207 struct trace_array *tr;
e870e9a1
LZ
208
209 mutex_lock(&event_mutex);
ae63b31e
SR
210 do_for_each_event_file(tr, file) {
211
212 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
e870e9a1
LZ
213 continue;
214
215 if (enable) {
216 tracing_start_cmdline_record();
ae63b31e 217 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1
LZ
218 } else {
219 tracing_stop_cmdline_record();
ae63b31e 220 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1 221 }
ae63b31e 222 } while_for_each_event_file();
e870e9a1
LZ
223 mutex_unlock(&event_mutex);
224}
225
ae63b31e
SR
226static int ftrace_event_enable_disable(struct ftrace_event_file *file,
227 int enable)
fd994989 228{
ae63b31e 229 struct ftrace_event_call *call = file->event_call;
3b8e4273
LZ
230 int ret = 0;
231
fd994989
SR
232 switch (enable) {
233 case 0:
ae63b31e
SR
234 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
235 file->flags &= ~FTRACE_EVENT_FL_ENABLED;
236 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
e870e9a1 237 tracing_stop_cmdline_record();
ae63b31e 238 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1 239 }
ae63b31e 240 call->class->reg(call, TRACE_REG_UNREGISTER, file);
fd994989 241 }
fd994989
SR
242 break;
243 case 1:
ae63b31e 244 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
e870e9a1
LZ
245 if (trace_flags & TRACE_ITER_RECORD_CMD) {
246 tracing_start_cmdline_record();
ae63b31e 247 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1 248 }
ae63b31e 249 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
3b8e4273
LZ
250 if (ret) {
251 tracing_stop_cmdline_record();
252 pr_info("event trace: Could not enable event "
253 "%s\n", call->name);
254 break;
255 }
ae63b31e 256 file->flags |= FTRACE_EVENT_FL_ENABLED;
fd994989 257 }
fd994989
SR
258 break;
259 }
3b8e4273
LZ
260
261 return ret;
fd994989
SR
262}
263
ae63b31e 264static void ftrace_clear_events(struct trace_array *tr)
0e907c99 265{
ae63b31e 266 struct ftrace_event_file *file;
0e907c99
Z
267
268 mutex_lock(&event_mutex);
ae63b31e
SR
269 list_for_each_entry(file, &tr->events, list) {
270 ftrace_event_enable_disable(file, 0);
0e907c99
Z
271 }
272 mutex_unlock(&event_mutex);
273}
274
e9dbfae5
SR
275static void __put_system(struct event_subsystem *system)
276{
277 struct event_filter *filter = system->filter;
278
279 WARN_ON_ONCE(system->ref_count == 0);
280 if (--system->ref_count)
281 return;
282
ae63b31e
SR
283 list_del(&system->list);
284
e9dbfae5
SR
285 if (filter) {
286 kfree(filter->filter_string);
287 kfree(filter);
288 }
289 kfree(system->name);
290 kfree(system);
291}
292
293static void __get_system(struct event_subsystem *system)
294{
295 WARN_ON_ONCE(system->ref_count == 0);
296 system->ref_count++;
297}
298
ae63b31e
SR
299static void __get_system_dir(struct ftrace_subsystem_dir *dir)
300{
301 WARN_ON_ONCE(dir->ref_count == 0);
302 dir->ref_count++;
303 __get_system(dir->subsystem);
304}
305
306static void __put_system_dir(struct ftrace_subsystem_dir *dir)
307{
308 WARN_ON_ONCE(dir->ref_count == 0);
309 /* If the subsystem is about to be freed, the dir must be too */
310 WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
311
312 __put_system(dir->subsystem);
313 if (!--dir->ref_count)
314 kfree(dir);
315}
316
317static void put_system(struct ftrace_subsystem_dir *dir)
e9dbfae5
SR
318{
319 mutex_lock(&event_mutex);
ae63b31e 320 __put_system_dir(dir);
e9dbfae5
SR
321 mutex_unlock(&event_mutex);
322}
323
8f31bfe5
LZ
324/*
325 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
326 */
ae63b31e
SR
327static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
328 const char *sub, const char *event, int set)
b77e38aa 329{
ae63b31e 330 struct ftrace_event_file *file;
a59fd602 331 struct ftrace_event_call *call;
29f93943 332 int ret = -EINVAL;
8f31bfe5
LZ
333
334 mutex_lock(&event_mutex);
ae63b31e
SR
335 list_for_each_entry(file, &tr->events, list) {
336
337 call = file->event_call;
8f31bfe5 338
a1d0ce82 339 if (!call->name || !call->class || !call->class->reg)
8f31bfe5
LZ
340 continue;
341
9b63776f
SR
342 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
343 continue;
344
8f31bfe5
LZ
345 if (match &&
346 strcmp(match, call->name) != 0 &&
8f082018 347 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
348 continue;
349
8f082018 350 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
351 continue;
352
353 if (event && strcmp(event, call->name) != 0)
354 continue;
355
ae63b31e 356 ftrace_event_enable_disable(file, set);
8f31bfe5
LZ
357
358 ret = 0;
359 }
360 mutex_unlock(&event_mutex);
361
362 return ret;
363}
364
ae63b31e 365static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
8f31bfe5 366{
b628b3e6 367 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
368
369 /*
370 * The buf format can be <subsystem>:<event-name>
371 * *:<event-name> means any event by that name.
372 * :<event-name> is the same.
373 *
374 * <subsystem>:* means all events in that subsystem
375 * <subsystem>: means the same.
376 *
377 * <name> (no ':') means all events in a subsystem with
378 * the name <name> or any event that matches <name>
379 */
380
381 match = strsep(&buf, ":");
382 if (buf) {
383 sub = match;
384 event = buf;
385 match = NULL;
386
387 if (!strlen(sub) || strcmp(sub, "*") == 0)
388 sub = NULL;
389 if (!strlen(event) || strcmp(event, "*") == 0)
390 event = NULL;
391 }
b77e38aa 392
ae63b31e 393 return __ftrace_set_clr_event(tr, match, sub, event, set);
b77e38aa
SR
394}
395
4671c794
SR
396/**
397 * trace_set_clr_event - enable or disable an event
398 * @system: system name to match (NULL for any system)
399 * @event: event name to match (NULL for all events, within system)
400 * @set: 1 to enable, 0 to disable
401 *
402 * This is a way for other parts of the kernel to enable or disable
403 * event recording.
404 *
405 * Returns 0 on success, -EINVAL if the parameters do not match any
406 * registered events.
407 */
408int trace_set_clr_event(const char *system, const char *event, int set)
409{
ae63b31e
SR
410 struct trace_array *tr = top_trace_array();
411
412 return __ftrace_set_clr_event(tr, NULL, system, event, set);
4671c794 413}
56355b83 414EXPORT_SYMBOL_GPL(trace_set_clr_event);
4671c794 415
b77e38aa
SR
416/* 128 should be much more than enough */
417#define EVENT_BUF_SIZE 127
418
419static ssize_t
420ftrace_event_write(struct file *file, const char __user *ubuf,
421 size_t cnt, loff_t *ppos)
422{
48966364 423 struct trace_parser parser;
ae63b31e
SR
424 struct seq_file *m = file->private_data;
425 struct trace_array *tr = m->private;
4ba7978e 426 ssize_t read, ret;
b77e38aa 427
4ba7978e 428 if (!cnt)
b77e38aa
SR
429 return 0;
430
1852fcce
SR
431 ret = tracing_update_buffers();
432 if (ret < 0)
433 return ret;
434
48966364 435 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
436 return -ENOMEM;
437
48966364 438 read = trace_get_user(&parser, ubuf, cnt, ppos);
439
4ba7978e 440 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 441 int set = 1;
b77e38aa 442
48966364 443 if (*parser.buffer == '!')
b77e38aa 444 set = 0;
b77e38aa 445
48966364 446 parser.buffer[parser.idx] = 0;
447
ae63b31e 448 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
b77e38aa 449 if (ret)
48966364 450 goto out_put;
b77e38aa 451 }
b77e38aa
SR
452
453 ret = read;
454
48966364 455 out_put:
456 trace_parser_put(&parser);
b77e38aa
SR
457
458 return ret;
459}
460
461static void *
462t_next(struct seq_file *m, void *v, loff_t *pos)
463{
ae63b31e
SR
464 struct ftrace_event_file *file = v;
465 struct ftrace_event_call *call;
466 struct trace_array *tr = m->private;
b77e38aa
SR
467
468 (*pos)++;
469
ae63b31e
SR
470 list_for_each_entry_continue(file, &tr->events, list) {
471 call = file->event_call;
40e26815
SR
472 /*
473 * The ftrace subsystem is for showing formats only.
474 * They can not be enabled or disabled via the event files.
475 */
a1d0ce82 476 if (call->class && call->class->reg)
ae63b31e 477 return file;
40e26815 478 }
b77e38aa 479
30bd39cd 480 return NULL;
b77e38aa
SR
481}
482
483static void *t_start(struct seq_file *m, loff_t *pos)
484{
ae63b31e
SR
485 struct ftrace_event_file *file;
486 struct trace_array *tr = m->private;
e1c7e2a6
LZ
487 loff_t l;
488
20c8928a 489 mutex_lock(&event_mutex);
e1c7e2a6 490
ae63b31e 491 file = list_entry(&tr->events, struct ftrace_event_file, list);
e1c7e2a6 492 for (l = 0; l <= *pos; ) {
ae63b31e
SR
493 file = t_next(m, file, &l);
494 if (!file)
e1c7e2a6
LZ
495 break;
496 }
ae63b31e 497 return file;
b77e38aa
SR
498}
499
500static void *
501s_next(struct seq_file *m, void *v, loff_t *pos)
502{
ae63b31e
SR
503 struct ftrace_event_file *file = v;
504 struct trace_array *tr = m->private;
b77e38aa
SR
505
506 (*pos)++;
507
ae63b31e
SR
508 list_for_each_entry_continue(file, &tr->events, list) {
509 if (file->flags & FTRACE_EVENT_FL_ENABLED)
510 return file;
b77e38aa
SR
511 }
512
30bd39cd 513 return NULL;
b77e38aa
SR
514}
515
516static void *s_start(struct seq_file *m, loff_t *pos)
517{
ae63b31e
SR
518 struct ftrace_event_file *file;
519 struct trace_array *tr = m->private;
e1c7e2a6
LZ
520 loff_t l;
521
20c8928a 522 mutex_lock(&event_mutex);
e1c7e2a6 523
ae63b31e 524 file = list_entry(&tr->events, struct ftrace_event_file, list);
e1c7e2a6 525 for (l = 0; l <= *pos; ) {
ae63b31e
SR
526 file = s_next(m, file, &l);
527 if (!file)
e1c7e2a6
LZ
528 break;
529 }
ae63b31e 530 return file;
b77e38aa
SR
531}
532
533static int t_show(struct seq_file *m, void *v)
534{
ae63b31e
SR
535 struct ftrace_event_file *file = v;
536 struct ftrace_event_call *call = file->event_call;
b77e38aa 537
8f082018
SR
538 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
539 seq_printf(m, "%s:", call->class->system);
b77e38aa
SR
540 seq_printf(m, "%s\n", call->name);
541
542 return 0;
543}
544
545static void t_stop(struct seq_file *m, void *p)
546{
20c8928a 547 mutex_unlock(&event_mutex);
b77e38aa
SR
548}
549
1473e441
SR
550static ssize_t
551event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
552 loff_t *ppos)
553{
ae63b31e 554 struct ftrace_event_file *file = filp->private_data;
1473e441
SR
555 char *buf;
556
ae63b31e 557 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1473e441
SR
558 buf = "1\n";
559 else
560 buf = "0\n";
561
562 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
563}
564
565static ssize_t
566event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
567 loff_t *ppos)
568{
ae63b31e 569 struct ftrace_event_file *file = filp->private_data;
1473e441
SR
570 unsigned long val;
571 int ret;
572
ae63b31e
SR
573 if (!file)
574 return -EINVAL;
575
22fe9b54
PH
576 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
577 if (ret)
1473e441
SR
578 return ret;
579
1852fcce
SR
580 ret = tracing_update_buffers();
581 if (ret < 0)
582 return ret;
583
1473e441
SR
584 switch (val) {
585 case 0:
1473e441 586 case 1:
11a241a3 587 mutex_lock(&event_mutex);
ae63b31e 588 ret = ftrace_event_enable_disable(file, val);
11a241a3 589 mutex_unlock(&event_mutex);
1473e441
SR
590 break;
591
592 default:
593 return -EINVAL;
594 }
595
596 *ppos += cnt;
597
3b8e4273 598 return ret ? ret : cnt;
1473e441
SR
599}
600
8ae79a13
SR
601static ssize_t
602system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
603 loff_t *ppos)
604{
c142b15d 605 const char set_to_char[4] = { '?', '0', '1', 'X' };
ae63b31e
SR
606 struct ftrace_subsystem_dir *dir = filp->private_data;
607 struct event_subsystem *system = dir->subsystem;
8ae79a13 608 struct ftrace_event_call *call;
ae63b31e
SR
609 struct ftrace_event_file *file;
610 struct trace_array *tr = dir->tr;
8ae79a13 611 char buf[2];
c142b15d 612 int set = 0;
8ae79a13
SR
613 int ret;
614
8ae79a13 615 mutex_lock(&event_mutex);
ae63b31e
SR
616 list_for_each_entry(file, &tr->events, list) {
617 call = file->event_call;
a1d0ce82 618 if (!call->name || !call->class || !call->class->reg)
8ae79a13
SR
619 continue;
620
40ee4dff 621 if (system && strcmp(call->class->system, system->name) != 0)
8ae79a13
SR
622 continue;
623
624 /*
625 * We need to find out if all the events are set
626 * or if all events or cleared, or if we have
627 * a mixture.
628 */
ae63b31e 629 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
c142b15d 630
8ae79a13
SR
631 /*
632 * If we have a mixture, no need to look further.
633 */
c142b15d 634 if (set == 3)
8ae79a13
SR
635 break;
636 }
637 mutex_unlock(&event_mutex);
638
c142b15d 639 buf[0] = set_to_char[set];
8ae79a13 640 buf[1] = '\n';
8ae79a13
SR
641
642 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
643
644 return ret;
645}
646
647static ssize_t
648system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
649 loff_t *ppos)
650{
ae63b31e
SR
651 struct ftrace_subsystem_dir *dir = filp->private_data;
652 struct event_subsystem *system = dir->subsystem;
40ee4dff 653 const char *name = NULL;
8ae79a13 654 unsigned long val;
8ae79a13
SR
655 ssize_t ret;
656
22fe9b54
PH
657 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
658 if (ret)
8ae79a13
SR
659 return ret;
660
661 ret = tracing_update_buffers();
662 if (ret < 0)
663 return ret;
664
8f31bfe5 665 if (val != 0 && val != 1)
8ae79a13 666 return -EINVAL;
8ae79a13 667
40ee4dff
SR
668 /*
669 * Opening of "enable" adds a ref count to system,
670 * so the name is safe to use.
671 */
672 if (system)
673 name = system->name;
674
ae63b31e 675 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
8ae79a13 676 if (ret)
8f31bfe5 677 goto out;
8ae79a13
SR
678
679 ret = cnt;
680
8f31bfe5 681out:
8ae79a13
SR
682 *ppos += cnt;
683
684 return ret;
685}
686
2a37a3df
SR
687enum {
688 FORMAT_HEADER = 1,
86397dc3
LZ
689 FORMAT_FIELD_SEPERATOR = 2,
690 FORMAT_PRINTFMT = 3,
2a37a3df
SR
691};
692
693static void *f_next(struct seq_file *m, void *v, loff_t *pos)
981d081e 694{
2a37a3df 695 struct ftrace_event_call *call = m->private;
5a65e956 696 struct ftrace_event_field *field;
86397dc3
LZ
697 struct list_head *common_head = &ftrace_common_fields;
698 struct list_head *head = trace_get_fields(call);
981d081e 699
2a37a3df 700 (*pos)++;
5a65e956 701
2a37a3df
SR
702 switch ((unsigned long)v) {
703 case FORMAT_HEADER:
86397dc3
LZ
704 if (unlikely(list_empty(common_head)))
705 return NULL;
706
707 field = list_entry(common_head->prev,
708 struct ftrace_event_field, link);
709 return field;
5a65e956 710
86397dc3 711 case FORMAT_FIELD_SEPERATOR:
2a37a3df
SR
712 if (unlikely(list_empty(head)))
713 return NULL;
5a65e956 714
2a37a3df
SR
715 field = list_entry(head->prev, struct ftrace_event_field, link);
716 return field;
5a65e956 717
2a37a3df
SR
718 case FORMAT_PRINTFMT:
719 /* all done */
720 return NULL;
5a65e956
LJ
721 }
722
2a37a3df 723 field = v;
86397dc3
LZ
724 if (field->link.prev == common_head)
725 return (void *)FORMAT_FIELD_SEPERATOR;
726 else if (field->link.prev == head)
2a37a3df
SR
727 return (void *)FORMAT_PRINTFMT;
728
729 field = list_entry(field->link.prev, struct ftrace_event_field, link);
730
2a37a3df 731 return field;
8728fe50 732}
5a65e956 733
2a37a3df 734static void *f_start(struct seq_file *m, loff_t *pos)
8728fe50 735{
2a37a3df
SR
736 loff_t l = 0;
737 void *p;
5a65e956 738
2a37a3df
SR
739 /* Start by showing the header */
740 if (!*pos)
741 return (void *)FORMAT_HEADER;
742
743 p = (void *)FORMAT_HEADER;
744 do {
745 p = f_next(m, p, &l);
746 } while (p && l < *pos);
747
748 return p;
749}
750
751static int f_show(struct seq_file *m, void *v)
752{
753 struct ftrace_event_call *call = m->private;
754 struct ftrace_event_field *field;
755 const char *array_descriptor;
756
757 switch ((unsigned long)v) {
758 case FORMAT_HEADER:
759 seq_printf(m, "name: %s\n", call->name);
760 seq_printf(m, "ID: %d\n", call->event.type);
761 seq_printf(m, "format:\n");
8728fe50 762 return 0;
5a65e956 763
86397dc3
LZ
764 case FORMAT_FIELD_SEPERATOR:
765 seq_putc(m, '\n');
766 return 0;
767
2a37a3df
SR
768 case FORMAT_PRINTFMT:
769 seq_printf(m, "\nprint fmt: %s\n",
770 call->print_fmt);
771 return 0;
981d081e 772 }
8728fe50 773
2a37a3df 774 field = v;
8728fe50 775
2a37a3df
SR
776 /*
777 * Smartly shows the array type(except dynamic array).
778 * Normal:
779 * field:TYPE VAR
780 * If TYPE := TYPE[LEN], it is shown:
781 * field:TYPE VAR[LEN]
782 */
783 array_descriptor = strchr(field->type, '[');
8728fe50 784
2a37a3df
SR
785 if (!strncmp(field->type, "__data_loc", 10))
786 array_descriptor = NULL;
8728fe50 787
2a37a3df
SR
788 if (!array_descriptor)
789 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
790 field->type, field->name, field->offset,
791 field->size, !!field->is_signed);
792 else
793 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
794 (int)(array_descriptor - field->type),
795 field->type, field->name,
796 array_descriptor, field->offset,
797 field->size, !!field->is_signed);
8728fe50 798
2a37a3df
SR
799 return 0;
800}
5a65e956 801
2a37a3df
SR
802static void f_stop(struct seq_file *m, void *p)
803{
804}
981d081e 805
2a37a3df
SR
806static const struct seq_operations trace_format_seq_ops = {
807 .start = f_start,
808 .next = f_next,
809 .stop = f_stop,
810 .show = f_show,
811};
812
813static int trace_format_open(struct inode *inode, struct file *file)
814{
815 struct ftrace_event_call *call = inode->i_private;
816 struct seq_file *m;
817 int ret;
818
819 ret = seq_open(file, &trace_format_seq_ops);
820 if (ret < 0)
821 return ret;
822
823 m = file->private_data;
824 m->private = call;
825
826 return 0;
981d081e
SR
827}
828
23725aee
PZ
829static ssize_t
830event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
831{
832 struct ftrace_event_call *call = filp->private_data;
833 struct trace_seq *s;
834 int r;
835
836 if (*ppos)
837 return 0;
838
839 s = kmalloc(sizeof(*s), GFP_KERNEL);
840 if (!s)
841 return -ENOMEM;
842
843 trace_seq_init(s);
32c0edae 844 trace_seq_printf(s, "%d\n", call->event.type);
23725aee
PZ
845
846 r = simple_read_from_buffer(ubuf, cnt, ppos,
847 s->buffer, s->len);
848 kfree(s);
849 return r;
850}
851
7ce7e424
TZ
852static ssize_t
853event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
854 loff_t *ppos)
855{
856 struct ftrace_event_call *call = filp->private_data;
857 struct trace_seq *s;
858 int r;
859
860 if (*ppos)
861 return 0;
862
863 s = kmalloc(sizeof(*s), GFP_KERNEL);
864 if (!s)
865 return -ENOMEM;
866
867 trace_seq_init(s);
868
8b372562 869 print_event_filter(call, s);
4bda2d51 870 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
871
872 kfree(s);
873
874 return r;
875}
876
877static ssize_t
878event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
879 loff_t *ppos)
880{
881 struct ftrace_event_call *call = filp->private_data;
8b372562 882 char *buf;
7ce7e424
TZ
883 int err;
884
8b372562 885 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
886 return -EINVAL;
887
8b372562
TZ
888 buf = (char *)__get_free_page(GFP_TEMPORARY);
889 if (!buf)
7ce7e424
TZ
890 return -ENOMEM;
891
8b372562
TZ
892 if (copy_from_user(buf, ubuf, cnt)) {
893 free_page((unsigned long) buf);
894 return -EFAULT;
7ce7e424 895 }
8b372562 896 buf[cnt] = '\0';
7ce7e424 897
8b372562
TZ
898 err = apply_event_filter(call, buf);
899 free_page((unsigned long) buf);
900 if (err < 0)
44e9c8b7 901 return err;
0a19e53c 902
7ce7e424
TZ
903 *ppos += cnt;
904
905 return cnt;
906}
907
e9dbfae5
SR
908static LIST_HEAD(event_subsystems);
909
910static int subsystem_open(struct inode *inode, struct file *filp)
911{
912 struct event_subsystem *system = NULL;
ae63b31e
SR
913 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
914 struct trace_array *tr;
e9dbfae5
SR
915 int ret;
916
917 /* Make sure the system still exists */
918 mutex_lock(&event_mutex);
ae63b31e
SR
919 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
920 list_for_each_entry(dir, &tr->systems, list) {
921 if (dir == inode->i_private) {
922 /* Don't open systems with no events */
923 if (dir->nr_events) {
924 __get_system_dir(dir);
925 system = dir->subsystem;
926 }
927 goto exit_loop;
e9dbfae5 928 }
e9dbfae5
SR
929 }
930 }
ae63b31e 931 exit_loop:
e9dbfae5
SR
932 mutex_unlock(&event_mutex);
933
ae63b31e 934 if (!system)
e9dbfae5
SR
935 return -ENODEV;
936
ae63b31e
SR
937 /* Some versions of gcc think dir can be uninitialized here */
938 WARN_ON(!dir);
939
e9dbfae5 940 ret = tracing_open_generic(inode, filp);
ae63b31e
SR
941 if (ret < 0)
942 put_system(dir);
943
944 return ret;
945}
946
947static int system_tr_open(struct inode *inode, struct file *filp)
948{
949 struct ftrace_subsystem_dir *dir;
950 struct trace_array *tr = inode->i_private;
951 int ret;
952
953 /* Make a temporary dir that has no system but points to tr */
954 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
955 if (!dir)
956 return -ENOMEM;
957
958 dir->tr = tr;
959
960 ret = tracing_open_generic(inode, filp);
961 if (ret < 0)
962 kfree(dir);
963
964 filp->private_data = dir;
e9dbfae5
SR
965
966 return ret;
967}
968
969static int subsystem_release(struct inode *inode, struct file *file)
970{
ae63b31e 971 struct ftrace_subsystem_dir *dir = file->private_data;
e9dbfae5 972
ae63b31e
SR
973 /*
974 * If dir->subsystem is NULL, then this is a temporary
975 * descriptor that was made for a trace_array to enable
976 * all subsystems.
977 */
978 if (dir->subsystem)
979 put_system(dir);
980 else
981 kfree(dir);
e9dbfae5
SR
982
983 return 0;
984}
985
cfb180f3
TZ
986static ssize_t
987subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
988 loff_t *ppos)
989{
ae63b31e
SR
990 struct ftrace_subsystem_dir *dir = filp->private_data;
991 struct event_subsystem *system = dir->subsystem;
cfb180f3
TZ
992 struct trace_seq *s;
993 int r;
994
995 if (*ppos)
996 return 0;
997
998 s = kmalloc(sizeof(*s), GFP_KERNEL);
999 if (!s)
1000 return -ENOMEM;
1001
1002 trace_seq_init(s);
1003
8b372562 1004 print_subsystem_event_filter(system, s);
4bda2d51 1005 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
1006
1007 kfree(s);
1008
1009 return r;
1010}
1011
1012static ssize_t
1013subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1014 loff_t *ppos)
1015{
ae63b31e 1016 struct ftrace_subsystem_dir *dir = filp->private_data;
8b372562 1017 char *buf;
cfb180f3
TZ
1018 int err;
1019
8b372562 1020 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
1021 return -EINVAL;
1022
8b372562
TZ
1023 buf = (char *)__get_free_page(GFP_TEMPORARY);
1024 if (!buf)
cfb180f3
TZ
1025 return -ENOMEM;
1026
8b372562
TZ
1027 if (copy_from_user(buf, ubuf, cnt)) {
1028 free_page((unsigned long) buf);
1029 return -EFAULT;
cfb180f3 1030 }
8b372562 1031 buf[cnt] = '\0';
cfb180f3 1032
ae63b31e 1033 err = apply_subsystem_event_filter(dir, buf);
8b372562
TZ
1034 free_page((unsigned long) buf);
1035 if (err < 0)
44e9c8b7 1036 return err;
cfb180f3
TZ
1037
1038 *ppos += cnt;
1039
1040 return cnt;
1041}
1042
d1b182a8
SR
1043static ssize_t
1044show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1045{
1046 int (*func)(struct trace_seq *s) = filp->private_data;
1047 struct trace_seq *s;
1048 int r;
1049
1050 if (*ppos)
1051 return 0;
1052
1053 s = kmalloc(sizeof(*s), GFP_KERNEL);
1054 if (!s)
1055 return -ENOMEM;
1056
1057 trace_seq_init(s);
1058
1059 func(s);
1060 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1061
1062 kfree(s);
1063
1064 return r;
1065}
1066
15075cac
SR
1067static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1068static int ftrace_event_set_open(struct inode *inode, struct file *file);
1069
b77e38aa
SR
1070static const struct seq_operations show_event_seq_ops = {
1071 .start = t_start,
1072 .next = t_next,
1073 .show = t_show,
1074 .stop = t_stop,
1075};
1076
1077static const struct seq_operations show_set_event_seq_ops = {
1078 .start = s_start,
1079 .next = s_next,
1080 .show = t_show,
1081 .stop = t_stop,
1082};
1083
2314c4ae 1084static const struct file_operations ftrace_avail_fops = {
15075cac 1085 .open = ftrace_event_avail_open,
2314c4ae
SR
1086 .read = seq_read,
1087 .llseek = seq_lseek,
1088 .release = seq_release,
1089};
1090
b77e38aa 1091static const struct file_operations ftrace_set_event_fops = {
15075cac 1092 .open = ftrace_event_set_open,
b77e38aa
SR
1093 .read = seq_read,
1094 .write = ftrace_event_write,
1095 .llseek = seq_lseek,
1096 .release = seq_release,
1097};
1098
1473e441
SR
1099static const struct file_operations ftrace_enable_fops = {
1100 .open = tracing_open_generic,
1101 .read = event_enable_read,
1102 .write = event_enable_write,
6038f373 1103 .llseek = default_llseek,
1473e441
SR
1104};
1105
981d081e 1106static const struct file_operations ftrace_event_format_fops = {
2a37a3df
SR
1107 .open = trace_format_open,
1108 .read = seq_read,
1109 .llseek = seq_lseek,
1110 .release = seq_release,
981d081e
SR
1111};
1112
23725aee
PZ
1113static const struct file_operations ftrace_event_id_fops = {
1114 .open = tracing_open_generic,
1115 .read = event_id_read,
6038f373 1116 .llseek = default_llseek,
23725aee
PZ
1117};
1118
7ce7e424
TZ
1119static const struct file_operations ftrace_event_filter_fops = {
1120 .open = tracing_open_generic,
1121 .read = event_filter_read,
1122 .write = event_filter_write,
6038f373 1123 .llseek = default_llseek,
7ce7e424
TZ
1124};
1125
cfb180f3 1126static const struct file_operations ftrace_subsystem_filter_fops = {
e9dbfae5 1127 .open = subsystem_open,
cfb180f3
TZ
1128 .read = subsystem_filter_read,
1129 .write = subsystem_filter_write,
6038f373 1130 .llseek = default_llseek,
e9dbfae5 1131 .release = subsystem_release,
cfb180f3
TZ
1132};
1133
8ae79a13 1134static const struct file_operations ftrace_system_enable_fops = {
40ee4dff 1135 .open = subsystem_open,
8ae79a13
SR
1136 .read = system_enable_read,
1137 .write = system_enable_write,
6038f373 1138 .llseek = default_llseek,
40ee4dff 1139 .release = subsystem_release,
8ae79a13
SR
1140};
1141
ae63b31e
SR
1142static const struct file_operations ftrace_tr_enable_fops = {
1143 .open = system_tr_open,
1144 .read = system_enable_read,
1145 .write = system_enable_write,
1146 .llseek = default_llseek,
1147 .release = subsystem_release,
1148};
1149
d1b182a8
SR
1150static const struct file_operations ftrace_show_header_fops = {
1151 .open = tracing_open_generic,
1152 .read = show_header,
6038f373 1153 .llseek = default_llseek,
d1b182a8
SR
1154};
1155
ae63b31e
SR
1156static int
1157ftrace_event_open(struct inode *inode, struct file *file,
1158 const struct seq_operations *seq_ops)
1473e441 1159{
ae63b31e
SR
1160 struct seq_file *m;
1161 int ret;
1473e441 1162
ae63b31e
SR
1163 ret = seq_open(file, seq_ops);
1164 if (ret < 0)
1165 return ret;
1166 m = file->private_data;
1167 /* copy tr over to seq ops */
1168 m->private = inode->i_private;
1473e441 1169
ae63b31e 1170 return ret;
1473e441
SR
1171}
1172
15075cac
SR
1173static int
1174ftrace_event_avail_open(struct inode *inode, struct file *file)
1175{
1176 const struct seq_operations *seq_ops = &show_event_seq_ops;
1177
ae63b31e 1178 return ftrace_event_open(inode, file, seq_ops);
15075cac
SR
1179}
1180
1181static int
1182ftrace_event_set_open(struct inode *inode, struct file *file)
1183{
1184 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
ae63b31e 1185 struct trace_array *tr = inode->i_private;
15075cac
SR
1186
1187 if ((file->f_mode & FMODE_WRITE) &&
1188 (file->f_flags & O_TRUNC))
ae63b31e 1189 ftrace_clear_events(tr);
15075cac 1190
ae63b31e
SR
1191 return ftrace_event_open(inode, file, seq_ops);
1192}
1193
1194static struct event_subsystem *
1195create_new_subsystem(const char *name)
1196{
1197 struct event_subsystem *system;
1198
1199 /* need to create new entry */
1200 system = kmalloc(sizeof(*system), GFP_KERNEL);
1201 if (!system)
1202 return NULL;
1203
1204 system->ref_count = 1;
1205 system->name = kstrdup(name, GFP_KERNEL);
1206
1207 if (!system->name)
1208 goto out_free;
1209
1210 system->filter = NULL;
1211
1212 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1213 if (!system->filter)
1214 goto out_free;
1215
1216 list_add(&system->list, &event_subsystems);
1217
1218 return system;
1219
1220 out_free:
1221 kfree(system->name);
1222 kfree(system);
1223 return NULL;
15075cac
SR
1224}
1225
6ecc2d1c 1226static struct dentry *
ae63b31e
SR
1227event_subsystem_dir(struct trace_array *tr, const char *name,
1228 struct ftrace_event_file *file, struct dentry *parent)
6ecc2d1c 1229{
ae63b31e 1230 struct ftrace_subsystem_dir *dir;
6ecc2d1c 1231 struct event_subsystem *system;
e1112b4d 1232 struct dentry *entry;
6ecc2d1c
SR
1233
1234 /* First see if we did not already create this dir */
ae63b31e
SR
1235 list_for_each_entry(dir, &tr->systems, list) {
1236 system = dir->subsystem;
dc82ec98 1237 if (strcmp(system->name, name) == 0) {
ae63b31e
SR
1238 dir->nr_events++;
1239 file->system = dir;
1240 return dir->entry;
dc82ec98 1241 }
6ecc2d1c
SR
1242 }
1243
ae63b31e
SR
1244 /* Now see if the system itself exists. */
1245 list_for_each_entry(system, &event_subsystems, list) {
1246 if (strcmp(system->name, name) == 0)
1247 break;
6ecc2d1c 1248 }
ae63b31e
SR
1249 /* Reset system variable when not found */
1250 if (&system->list == &event_subsystems)
1251 system = NULL;
6ecc2d1c 1252
ae63b31e
SR
1253 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1254 if (!dir)
1255 goto out_fail;
6ecc2d1c 1256
ae63b31e
SR
1257 if (!system) {
1258 system = create_new_subsystem(name);
1259 if (!system)
1260 goto out_free;
1261 } else
1262 __get_system(system);
1263
1264 dir->entry = debugfs_create_dir(name, parent);
1265 if (!dir->entry) {
1266 pr_warning("Failed to create system directory %s\n", name);
1267 __put_system(system);
1268 goto out_free;
6d723736
SR
1269 }
1270
ae63b31e
SR
1271 dir->tr = tr;
1272 dir->ref_count = 1;
1273 dir->nr_events = 1;
1274 dir->subsystem = system;
1275 file->system = dir;
8b372562 1276
ae63b31e 1277 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
e1112b4d 1278 &ftrace_subsystem_filter_fops);
8b372562
TZ
1279 if (!entry) {
1280 kfree(system->filter);
1281 system->filter = NULL;
ae63b31e 1282 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
8b372562 1283 }
e1112b4d 1284
ae63b31e 1285 trace_create_file("enable", 0644, dir->entry, dir,
f3f3f009 1286 &ftrace_system_enable_fops);
8ae79a13 1287
ae63b31e
SR
1288 list_add(&dir->list, &tr->systems);
1289
1290 return dir->entry;
1291
1292 out_free:
1293 kfree(dir);
1294 out_fail:
1295 /* Only print this message if failed on memory allocation */
1296 if (!dir || !system)
1297 pr_warning("No memory to create event subsystem %s\n",
1298 name);
1299 return NULL;
6ecc2d1c
SR
1300}
1301
1473e441 1302static int
ae63b31e
SR
1303event_create_dir(struct dentry *parent,
1304 struct ftrace_event_file *file,
701970b3
SR
1305 const struct file_operations *id,
1306 const struct file_operations *enable,
1307 const struct file_operations *filter,
1308 const struct file_operations *format)
1473e441 1309{
ae63b31e
SR
1310 struct ftrace_event_call *call = file->event_call;
1311 struct trace_array *tr = file->tr;
2e33af02 1312 struct list_head *head;
ae63b31e 1313 struct dentry *d_events;
fd994989 1314 int ret;
1473e441 1315
6ecc2d1c
SR
1316 /*
1317 * If the trace point header did not define TRACE_SYSTEM
1318 * then the system would be called "TRACE_SYSTEM".
1319 */
ae63b31e
SR
1320 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1321 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1322 if (!d_events)
1323 return -ENOMEM;
1324 } else
1325 d_events = parent;
1326
1327 file->dir = debugfs_create_dir(call->name, d_events);
1328 if (!file->dir) {
1329 pr_warning("Could not create debugfs '%s' directory\n",
1330 call->name);
1473e441
SR
1331 return -1;
1332 }
1333
9b63776f 1334 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
ae63b31e 1335 trace_create_file("enable", 0644, file->dir, file,
f3f3f009 1336 enable);
1473e441 1337
2239291a 1338#ifdef CONFIG_PERF_EVENTS
a1d0ce82 1339 if (call->event.type && call->class->reg)
ae63b31e 1340 trace_create_file("id", 0444, file->dir, call,
f3f3f009 1341 id);
2239291a 1342#endif
23725aee 1343
c9d932cf
LZ
1344 /*
1345 * Other events may have the same class. Only update
1346 * the fields if they are not already defined.
1347 */
1348 head = trace_get_fields(call);
1349 if (list_empty(head)) {
1350 ret = call->class->define_fields(call);
1351 if (ret < 0) {
1352 pr_warning("Could not initialize trace point"
1353 " events/%s\n", call->name);
ae63b31e 1354 return -1;
cf027f64
TZ
1355 }
1356 }
ae63b31e 1357 trace_create_file("filter", 0644, file->dir, call,
c9d932cf 1358 filter);
cf027f64 1359
ae63b31e 1360 trace_create_file("format", 0444, file->dir, call,
f3f3f009 1361 format);
6d723736
SR
1362
1363 return 0;
1364}
1365
ae63b31e
SR
1366static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1367{
1368 if (!dir)
1369 return;
1370
1371 if (!--dir->nr_events) {
1372 debugfs_remove_recursive(dir->entry);
1373 list_del(&dir->list);
1374 __put_system_dir(dir);
1375 }
1376}
1377
1378static void remove_event_from_tracers(struct ftrace_event_call *call)
1379{
1380 struct ftrace_event_file *file;
1381 struct trace_array *tr;
1382
1383 do_for_each_event_file_safe(tr, file) {
1384
1385 if (file->event_call != call)
1386 continue;
1387
1388 list_del(&file->list);
1389 debugfs_remove_recursive(file->dir);
1390 remove_subsystem(file->system);
d1a29143 1391 kmem_cache_free(file_cachep, file);
ae63b31e
SR
1392
1393 /*
1394 * The do_for_each_event_file_safe() is
1395 * a double loop. After finding the call for this
1396 * trace_array, we use break to jump to the next
1397 * trace_array.
1398 */
1399 break;
1400 } while_for_each_event_file();
1401}
1402
8781915a
EG
1403static void event_remove(struct ftrace_event_call *call)
1404{
ae63b31e
SR
1405 struct trace_array *tr;
1406 struct ftrace_event_file *file;
1407
1408 do_for_each_event_file(tr, file) {
1409 if (file->event_call != call)
1410 continue;
1411 ftrace_event_enable_disable(file, 0);
1412 /*
1413 * The do_for_each_event_file() is
1414 * a double loop. After finding the call for this
1415 * trace_array, we use break to jump to the next
1416 * trace_array.
1417 */
1418 break;
1419 } while_for_each_event_file();
1420
8781915a
EG
1421 if (call->event.funcs)
1422 __unregister_ftrace_event(&call->event);
ae63b31e 1423 remove_event_from_tracers(call);
8781915a
EG
1424 list_del(&call->list);
1425}
1426
1427static int event_init(struct ftrace_event_call *call)
1428{
1429 int ret = 0;
1430
1431 if (WARN_ON(!call->name))
1432 return -EINVAL;
1433
1434 if (call->class->raw_init) {
1435 ret = call->class->raw_init(call);
1436 if (ret < 0 && ret != -ENOSYS)
1437 pr_warn("Could not initialize trace events/%s\n",
1438 call->name);
1439 }
1440
1441 return ret;
1442}
1443
67ead0a6 1444static int
ae63b31e 1445__register_event(struct ftrace_event_call *call, struct module *mod)
bd1a5c84 1446{
bd1a5c84 1447 int ret;
6d723736 1448
8781915a
EG
1449 ret = event_init(call);
1450 if (ret < 0)
1451 return ret;
701970b3 1452
ae63b31e 1453 list_add(&call->list, &ftrace_events);
67ead0a6 1454 call->mod = mod;
88f70d75 1455
ae63b31e 1456 return 0;
bd1a5c84
MH
1457}
1458
ae63b31e
SR
1459/* Add an event to a trace directory */
1460static int
1461__trace_add_new_event(struct ftrace_event_call *call,
1462 struct trace_array *tr,
1463 const struct file_operations *id,
1464 const struct file_operations *enable,
1465 const struct file_operations *filter,
1466 const struct file_operations *format)
1467{
1468 struct ftrace_event_file *file;
1469
d1a29143 1470 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
ae63b31e
SR
1471 if (!file)
1472 return -ENOMEM;
1473
1474 file->event_call = call;
1475 file->tr = tr;
1476 list_add(&file->list, &tr->events);
1477
1478 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1479}
1480
77248221
SR
1481/*
1482 * Just create a decriptor for early init. A descriptor is required
1483 * for enabling events at boot. We want to enable events before
1484 * the filesystem is initialized.
1485 */
1486static __init int
1487__trace_early_add_new_event(struct ftrace_event_call *call,
1488 struct trace_array *tr)
1489{
1490 struct ftrace_event_file *file;
1491
d1a29143 1492 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
77248221
SR
1493 if (!file)
1494 return -ENOMEM;
1495
1496 file->event_call = call;
1497 file->tr = tr;
1498 list_add(&file->list, &tr->events);
1499
1500 return 0;
1501}
1502
ae63b31e
SR
1503struct ftrace_module_file_ops;
1504static void __add_event_to_tracers(struct ftrace_event_call *call,
1505 struct ftrace_module_file_ops *file_ops);
1506
bd1a5c84
MH
1507/* Add an additional event_call dynamically */
1508int trace_add_event_call(struct ftrace_event_call *call)
1509{
1510 int ret;
1511 mutex_lock(&event_mutex);
701970b3 1512
ae63b31e
SR
1513 ret = __register_event(call, NULL);
1514 if (ret >= 0)
1515 __add_event_to_tracers(call, NULL);
a2ca5e03 1516
ae63b31e
SR
1517 mutex_unlock(&event_mutex);
1518 return ret;
a2ca5e03
FW
1519}
1520
4fead8e4
MH
1521/*
1522 * Must be called under locking both of event_mutex and trace_event_mutex.
1523 */
bd1a5c84
MH
1524static void __trace_remove_event_call(struct ftrace_event_call *call)
1525{
8781915a 1526 event_remove(call);
bd1a5c84
MH
1527 trace_destroy_fields(call);
1528 destroy_preds(call);
bd1a5c84
MH
1529}
1530
1531/* Remove an event_call */
1532void trace_remove_event_call(struct ftrace_event_call *call)
1533{
1534 mutex_lock(&event_mutex);
4fead8e4 1535 down_write(&trace_event_mutex);
bd1a5c84 1536 __trace_remove_event_call(call);
4fead8e4 1537 up_write(&trace_event_mutex);
bd1a5c84
MH
1538 mutex_unlock(&event_mutex);
1539}
1540
1541#define for_each_event(event, start, end) \
1542 for (event = start; \
1543 (unsigned long)event < (unsigned long)end; \
1544 event++)
1545
1546#ifdef CONFIG_MODULES
1547
1548static LIST_HEAD(ftrace_module_file_list);
1549
1550/*
1551 * Modules must own their file_operations to keep up with
1552 * reference counting.
1553 */
1554struct ftrace_module_file_ops {
1555 struct list_head list;
1556 struct module *mod;
1557 struct file_operations id;
1558 struct file_operations enable;
1559 struct file_operations format;
1560 struct file_operations filter;
1561};
1562
ae63b31e
SR
1563static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1564{
1565 struct ftrace_module_file_ops *file_ops;
1566
1567 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1568 if (file_ops->mod == mod)
1569 return file_ops;
1570 }
1571 return NULL;
1572}
1573
701970b3
SR
1574static struct ftrace_module_file_ops *
1575trace_create_file_ops(struct module *mod)
1576{
1577 struct ftrace_module_file_ops *file_ops;
1578
1579 /*
1580 * This is a bit of a PITA. To allow for correct reference
1581 * counting, modules must "own" their file_operations.
1582 * To do this, we allocate the file operations that will be
1583 * used in the event directory.
1584 */
1585
1586 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1587 if (!file_ops)
1588 return NULL;
1589
1590 file_ops->mod = mod;
1591
1592 file_ops->id = ftrace_event_id_fops;
1593 file_ops->id.owner = mod;
1594
1595 file_ops->enable = ftrace_enable_fops;
1596 file_ops->enable.owner = mod;
1597
1598 file_ops->filter = ftrace_event_filter_fops;
1599 file_ops->filter.owner = mod;
1600
1601 file_ops->format = ftrace_event_format_fops;
1602 file_ops->format.owner = mod;
1603
1604 list_add(&file_ops->list, &ftrace_module_file_list);
1605
1606 return file_ops;
1607}
1608
6d723736
SR
1609static void trace_module_add_events(struct module *mod)
1610{
701970b3 1611 struct ftrace_module_file_ops *file_ops = NULL;
e4a9ea5e 1612 struct ftrace_event_call **call, **start, **end;
6d723736
SR
1613
1614 start = mod->trace_events;
1615 end = mod->trace_events + mod->num_trace_events;
1616
1617 if (start == end)
1618 return;
1619
67ead0a6
LZ
1620 file_ops = trace_create_file_ops(mod);
1621 if (!file_ops)
6d723736
SR
1622 return;
1623
1624 for_each_event(call, start, end) {
ae63b31e
SR
1625 __register_event(*call, mod);
1626 __add_event_to_tracers(*call, file_ops);
6d723736
SR
1627 }
1628}
1629
1630static void trace_module_remove_events(struct module *mod)
1631{
701970b3 1632 struct ftrace_module_file_ops *file_ops;
6d723736 1633 struct ftrace_event_call *call, *p;
9456f0fa 1634 bool found = false;
6d723736 1635
110bf2b7 1636 down_write(&trace_event_mutex);
6d723736
SR
1637 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1638 if (call->mod == mod) {
9456f0fa 1639 found = true;
bd1a5c84 1640 __trace_remove_event_call(call);
6d723736
SR
1641 }
1642 }
701970b3
SR
1643
1644 /* Now free the file_operations */
1645 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1646 if (file_ops->mod == mod)
1647 break;
1648 }
1649 if (&file_ops->list != &ftrace_module_file_list) {
1650 list_del(&file_ops->list);
1651 kfree(file_ops);
1652 }
9456f0fa
SR
1653
1654 /*
1655 * It is safest to reset the ring buffer if the module being unloaded
1656 * registered any events.
1657 */
1658 if (found)
1659 tracing_reset_current_online_cpus();
110bf2b7 1660 up_write(&trace_event_mutex);
6d723736
SR
1661}
1662
61f919a1
SR
1663static int trace_module_notify(struct notifier_block *self,
1664 unsigned long val, void *data)
6d723736
SR
1665{
1666 struct module *mod = data;
1667
1668 mutex_lock(&event_mutex);
1669 switch (val) {
1670 case MODULE_STATE_COMING:
1671 trace_module_add_events(mod);
1672 break;
1673 case MODULE_STATE_GOING:
1674 trace_module_remove_events(mod);
1675 break;
1676 }
1677 mutex_unlock(&event_mutex);
fd994989 1678
1473e441
SR
1679 return 0;
1680}
61f919a1 1681#else
ae63b31e
SR
1682static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1683{
1684 return NULL;
1685}
61f919a1
SR
1686static int trace_module_notify(struct notifier_block *self,
1687 unsigned long val, void *data)
1688{
1689 return 0;
1690}
1691#endif /* CONFIG_MODULES */
1473e441 1692
ae63b31e
SR
1693/* Create a new event directory structure for a trace directory. */
1694static void
1695__trace_add_event_dirs(struct trace_array *tr)
1696{
1697 struct ftrace_module_file_ops *file_ops = NULL;
1698 struct ftrace_event_call *call;
1699 int ret;
1700
1701 list_for_each_entry(call, &ftrace_events, list) {
1702 if (call->mod) {
1703 /*
1704 * Directories for events by modules need to
1705 * keep module ref counts when opened (as we don't
1706 * want the module to disappear when reading one
1707 * of these files). The file_ops keep account of
1708 * the module ref count.
1709 *
1710 * As event_calls are added in groups by module,
1711 * when we find one file_ops, we don't need to search for
1712 * each call in that module, as the rest should be the
1713 * same. Only search for a new one if the last one did
1714 * not match.
1715 */
1716 if (!file_ops || call->mod != file_ops->mod)
1717 file_ops = find_ftrace_file_ops(call->mod);
1718 if (!file_ops)
1719 continue; /* Warn? */
1720 ret = __trace_add_new_event(call, tr,
1721 &file_ops->id, &file_ops->enable,
1722 &file_ops->filter, &file_ops->format);
1723 if (ret < 0)
1724 pr_warning("Could not create directory for event %s\n",
1725 call->name);
1726 continue;
1727 }
1728 ret = __trace_add_new_event(call, tr,
1729 &ftrace_event_id_fops,
1730 &ftrace_enable_fops,
1731 &ftrace_event_filter_fops,
1732 &ftrace_event_format_fops);
1733 if (ret < 0)
1734 pr_warning("Could not create directory for event %s\n",
1735 call->name);
1736 }
1737}
1738
77248221
SR
1739/*
1740 * The top level array has already had its ftrace_event_file
1741 * descriptors created in order to allow for early events to
1742 * be recorded. This function is called after the debugfs has been
1743 * initialized, and we now have to create the files associated
1744 * to the events.
1745 */
1746static __init void
1747__trace_early_add_event_dirs(struct trace_array *tr)
1748{
1749 struct ftrace_event_file *file;
1750 int ret;
1751
1752
1753 list_for_each_entry(file, &tr->events, list) {
1754 ret = event_create_dir(tr->event_dir, file,
1755 &ftrace_event_id_fops,
1756 &ftrace_enable_fops,
1757 &ftrace_event_filter_fops,
1758 &ftrace_event_format_fops);
1759 if (ret < 0)
1760 pr_warning("Could not create directory for event %s\n",
1761 file->event_call->name);
1762 }
1763}
1764
1765/*
1766 * For early boot up, the top trace array requires to have
1767 * a list of events that can be enabled. This must be done before
1768 * the filesystem is set up in order to allow events to be traced
1769 * early.
1770 */
1771static __init void
1772__trace_early_add_events(struct trace_array *tr)
1773{
1774 struct ftrace_event_call *call;
1775 int ret;
1776
1777 list_for_each_entry(call, &ftrace_events, list) {
1778 /* Early boot up should not have any modules loaded */
1779 if (WARN_ON_ONCE(call->mod))
1780 continue;
1781
1782 ret = __trace_early_add_new_event(call, tr);
1783 if (ret < 0)
1784 pr_warning("Could not create early event %s\n",
1785 call->name);
1786 }
1787}
1788
0c8916c3
SR
1789/* Remove the event directory structure for a trace directory. */
1790static void
1791__trace_remove_event_dirs(struct trace_array *tr)
1792{
1793 struct ftrace_event_file *file, *next;
1794
1795 list_for_each_entry_safe(file, next, &tr->events, list) {
1796 list_del(&file->list);
1797 debugfs_remove_recursive(file->dir);
1798 remove_subsystem(file->system);
d1a29143 1799 kmem_cache_free(file_cachep, file);
0c8916c3
SR
1800 }
1801}
1802
ae63b31e
SR
1803static void
1804__add_event_to_tracers(struct ftrace_event_call *call,
1805 struct ftrace_module_file_ops *file_ops)
1806{
1807 struct trace_array *tr;
1808
1809 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1810 if (file_ops)
1811 __trace_add_new_event(call, tr,
1812 &file_ops->id, &file_ops->enable,
1813 &file_ops->filter, &file_ops->format);
1814 else
1815 __trace_add_new_event(call, tr,
1816 &ftrace_event_id_fops,
1817 &ftrace_enable_fops,
1818 &ftrace_event_filter_fops,
1819 &ftrace_event_format_fops);
1820 }
1821}
1822
ec827c7e 1823static struct notifier_block trace_module_nb = {
6d723736
SR
1824 .notifier_call = trace_module_notify,
1825 .priority = 0,
1826};
1827
e4a9ea5e
SR
1828extern struct ftrace_event_call *__start_ftrace_events[];
1829extern struct ftrace_event_call *__stop_ftrace_events[];
a59fd602 1830
020e5f85
LZ
1831static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1832
1833static __init int setup_trace_event(char *str)
1834{
1835 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1836 ring_buffer_expanded = 1;
1837 tracing_selftest_disabled = 1;
1838
1839 return 1;
1840}
1841__setup("trace_event=", setup_trace_event);
1842
77248221
SR
1843/* Expects to have event_mutex held when called */
1844static int
1845create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
ae63b31e
SR
1846{
1847 struct dentry *d_events;
1848 struct dentry *entry;
1849
1850 entry = debugfs_create_file("set_event", 0644, parent,
1851 tr, &ftrace_set_event_fops);
1852 if (!entry) {
1853 pr_warning("Could not create debugfs 'set_event' entry\n");
1854 return -ENOMEM;
1855 }
1856
1857 d_events = debugfs_create_dir("events", parent);
277ba044 1858 if (!d_events) {
ae63b31e 1859 pr_warning("Could not create debugfs 'events' directory\n");
277ba044
SR
1860 return -ENOMEM;
1861 }
ae63b31e
SR
1862
1863 /* ring buffer internal formats */
1864 trace_create_file("header_page", 0444, d_events,
1865 ring_buffer_print_page_header,
1866 &ftrace_show_header_fops);
1867
1868 trace_create_file("header_event", 0444, d_events,
1869 ring_buffer_print_entry_header,
1870 &ftrace_show_header_fops);
1871
1872 trace_create_file("enable", 0644, d_events,
1873 tr, &ftrace_tr_enable_fops);
1874
1875 tr->event_dir = d_events;
77248221
SR
1876
1877 return 0;
1878}
1879
1880/**
1881 * event_trace_add_tracer - add a instance of a trace_array to events
1882 * @parent: The parent dentry to place the files/directories for events in
1883 * @tr: The trace array associated with these events
1884 *
1885 * When a new instance is created, it needs to set up its events
1886 * directory, as well as other files associated with events. It also
1887 * creates the event hierachry in the @parent/events directory.
1888 *
1889 * Returns 0 on success.
1890 */
1891int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
1892{
1893 int ret;
1894
1895 mutex_lock(&event_mutex);
1896
1897 ret = create_event_toplevel_files(parent, tr);
1898 if (ret)
1899 goto out_unlock;
1900
277ba044 1901 down_write(&trace_event_mutex);
ae63b31e 1902 __trace_add_event_dirs(tr);
277ba044
SR
1903 up_write(&trace_event_mutex);
1904
77248221 1905 out_unlock:
277ba044 1906 mutex_unlock(&event_mutex);
ae63b31e 1907
77248221
SR
1908 return ret;
1909}
1910
1911/*
1912 * The top trace array already had its file descriptors created.
1913 * Now the files themselves need to be created.
1914 */
1915static __init int
1916early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
1917{
1918 int ret;
1919
1920 mutex_lock(&event_mutex);
1921
1922 ret = create_event_toplevel_files(parent, tr);
1923 if (ret)
1924 goto out_unlock;
1925
1926 down_write(&trace_event_mutex);
1927 __trace_early_add_event_dirs(tr);
1928 up_write(&trace_event_mutex);
1929
1930 out_unlock:
1931 mutex_unlock(&event_mutex);
1932
1933 return ret;
ae63b31e
SR
1934}
1935
0c8916c3
SR
1936int event_trace_del_tracer(struct trace_array *tr)
1937{
1938 /* Disable any running events */
1939 __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
1940
1941 mutex_lock(&event_mutex);
1942
1943 down_write(&trace_event_mutex);
1944 __trace_remove_event_dirs(tr);
1945 debugfs_remove_recursive(tr->event_dir);
1946 up_write(&trace_event_mutex);
1947
1948 tr->event_dir = NULL;
1949
1950 mutex_unlock(&event_mutex);
1951
1952 return 0;
1953}
1954
d1a29143
SR
1955static __init int event_trace_memsetup(void)
1956{
1957 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
1958 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
1959 return 0;
1960}
1961
8781915a
EG
1962static __init int event_trace_enable(void)
1963{
ae63b31e 1964 struct trace_array *tr = top_trace_array();
8781915a
EG
1965 struct ftrace_event_call **iter, *call;
1966 char *buf = bootup_event_buf;
1967 char *token;
1968 int ret;
1969
1970 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
1971
1972 call = *iter;
1973 ret = event_init(call);
1974 if (!ret)
1975 list_add(&call->list, &ftrace_events);
1976 }
1977
77248221
SR
1978 /*
1979 * We need the top trace array to have a working set of trace
1980 * points at early init, before the debug files and directories
1981 * are created. Create the file entries now, and attach them
1982 * to the actual file dentries later.
1983 */
1984 __trace_early_add_events(tr);
1985
8781915a
EG
1986 while (true) {
1987 token = strsep(&buf, ",");
1988
1989 if (!token)
1990 break;
1991 if (!*token)
1992 continue;
1993
ae63b31e 1994 ret = ftrace_set_clr_event(tr, token, 1);
8781915a
EG
1995 if (ret)
1996 pr_warn("Failed to enable trace event: %s\n", token);
1997 }
81698831
SR
1998
1999 trace_printk_start_comm();
2000
8781915a
EG
2001 return 0;
2002}
2003
b77e38aa
SR
2004static __init int event_trace_init(void)
2005{
ae63b31e 2006 struct trace_array *tr;
b77e38aa
SR
2007 struct dentry *d_tracer;
2008 struct dentry *entry;
6d723736 2009 int ret;
b77e38aa 2010
ae63b31e
SR
2011 tr = top_trace_array();
2012
b77e38aa
SR
2013 d_tracer = tracing_init_dentry();
2014 if (!d_tracer)
2015 return 0;
2016
2314c4ae 2017 entry = debugfs_create_file("available_events", 0444, d_tracer,
ae63b31e 2018 tr, &ftrace_avail_fops);
2314c4ae
SR
2019 if (!entry)
2020 pr_warning("Could not create debugfs "
2021 "'available_events' entry\n");
2022
8728fe50
LZ
2023 if (trace_define_common_fields())
2024 pr_warning("tracing: Failed to allocate common fields");
2025
77248221 2026 ret = early_event_add_tracer(d_tracer, tr);
ae63b31e
SR
2027 if (ret)
2028 return ret;
020e5f85 2029
6d723736 2030 ret = register_module_notifier(&trace_module_nb);
55379376 2031 if (ret)
6d723736
SR
2032 pr_warning("Failed to register trace events module notifier\n");
2033
b77e38aa
SR
2034 return 0;
2035}
d1a29143 2036early_initcall(event_trace_memsetup);
8781915a 2037core_initcall(event_trace_enable);
b77e38aa 2038fs_initcall(event_trace_init);
e6187007
SR
2039
2040#ifdef CONFIG_FTRACE_STARTUP_TEST
2041
2042static DEFINE_SPINLOCK(test_spinlock);
2043static DEFINE_SPINLOCK(test_spinlock_irq);
2044static DEFINE_MUTEX(test_mutex);
2045
2046static __init void test_work(struct work_struct *dummy)
2047{
2048 spin_lock(&test_spinlock);
2049 spin_lock_irq(&test_spinlock_irq);
2050 udelay(1);
2051 spin_unlock_irq(&test_spinlock_irq);
2052 spin_unlock(&test_spinlock);
2053
2054 mutex_lock(&test_mutex);
2055 msleep(1);
2056 mutex_unlock(&test_mutex);
2057}
2058
2059static __init int event_test_thread(void *unused)
2060{
2061 void *test_malloc;
2062
2063 test_malloc = kmalloc(1234, GFP_KERNEL);
2064 if (!test_malloc)
2065 pr_info("failed to kmalloc\n");
2066
2067 schedule_on_each_cpu(test_work);
2068
2069 kfree(test_malloc);
2070
2071 set_current_state(TASK_INTERRUPTIBLE);
2072 while (!kthread_should_stop())
2073 schedule();
2074
2075 return 0;
2076}
2077
2078/*
2079 * Do various things that may trigger events.
2080 */
2081static __init void event_test_stuff(void)
2082{
2083 struct task_struct *test_thread;
2084
2085 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2086 msleep(1);
2087 kthread_stop(test_thread);
2088}
2089
2090/*
2091 * For every trace event defined, we will test each trace point separately,
2092 * and then by groups, and finally all trace points.
2093 */
9ea21c1e 2094static __init void event_trace_self_tests(void)
e6187007 2095{
ae63b31e
SR
2096 struct ftrace_subsystem_dir *dir;
2097 struct ftrace_event_file *file;
e6187007
SR
2098 struct ftrace_event_call *call;
2099 struct event_subsystem *system;
ae63b31e 2100 struct trace_array *tr;
e6187007
SR
2101 int ret;
2102
ae63b31e
SR
2103 tr = top_trace_array();
2104
e6187007
SR
2105 pr_info("Running tests on trace events:\n");
2106
ae63b31e
SR
2107 list_for_each_entry(file, &tr->events, list) {
2108
2109 call = file->event_call;
e6187007 2110
2239291a
SR
2111 /* Only test those that have a probe */
2112 if (!call->class || !call->class->probe)
e6187007
SR
2113 continue;
2114
1f5a6b45
SR
2115/*
2116 * Testing syscall events here is pretty useless, but
2117 * we still do it if configured. But this is time consuming.
2118 * What we really need is a user thread to perform the
2119 * syscalls as we test.
2120 */
2121#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
2122 if (call->class->system &&
2123 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
2124 continue;
2125#endif
2126
e6187007
SR
2127 pr_info("Testing event %s: ", call->name);
2128
2129 /*
2130 * If an event is already enabled, someone is using
2131 * it and the self test should not be on.
2132 */
ae63b31e 2133 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
e6187007
SR
2134 pr_warning("Enabled event during self test!\n");
2135 WARN_ON_ONCE(1);
2136 continue;
2137 }
2138
ae63b31e 2139 ftrace_event_enable_disable(file, 1);
e6187007 2140 event_test_stuff();
ae63b31e 2141 ftrace_event_enable_disable(file, 0);
e6187007
SR
2142
2143 pr_cont("OK\n");
2144 }
2145
2146 /* Now test at the sub system level */
2147
2148 pr_info("Running tests on trace event systems:\n");
2149
ae63b31e
SR
2150 list_for_each_entry(dir, &tr->systems, list) {
2151
2152 system = dir->subsystem;
e6187007
SR
2153
2154 /* the ftrace system is special, skip it */
2155 if (strcmp(system->name, "ftrace") == 0)
2156 continue;
2157
2158 pr_info("Testing event system %s: ", system->name);
2159
ae63b31e 2160 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
e6187007
SR
2161 if (WARN_ON_ONCE(ret)) {
2162 pr_warning("error enabling system %s\n",
2163 system->name);
2164 continue;
2165 }
2166
2167 event_test_stuff();
2168
ae63b31e 2169 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
76bab1b7 2170 if (WARN_ON_ONCE(ret)) {
e6187007
SR
2171 pr_warning("error disabling system %s\n",
2172 system->name);
76bab1b7
YL
2173 continue;
2174 }
e6187007
SR
2175
2176 pr_cont("OK\n");
2177 }
2178
2179 /* Test with all events enabled */
2180
2181 pr_info("Running tests on all trace events:\n");
2182 pr_info("Testing all events: ");
2183
ae63b31e 2184 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
e6187007 2185 if (WARN_ON_ONCE(ret)) {
e6187007 2186 pr_warning("error enabling all events\n");
9ea21c1e 2187 return;
e6187007
SR
2188 }
2189
2190 event_test_stuff();
2191
2192 /* reset sysname */
ae63b31e 2193 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
e6187007
SR
2194 if (WARN_ON_ONCE(ret)) {
2195 pr_warning("error disabling all events\n");
9ea21c1e 2196 return;
e6187007
SR
2197 }
2198
2199 pr_cont("OK\n");
9ea21c1e
SR
2200}
2201
2202#ifdef CONFIG_FUNCTION_TRACER
2203
245b2e70 2204static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
2205
2206static void
2f5f6ad9 2207function_test_events_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 2208 struct ftrace_ops *op, struct pt_regs *pt_regs)
9ea21c1e
SR
2209{
2210 struct ring_buffer_event *event;
e77405ad 2211 struct ring_buffer *buffer;
9ea21c1e
SR
2212 struct ftrace_entry *entry;
2213 unsigned long flags;
2214 long disabled;
9ea21c1e
SR
2215 int cpu;
2216 int pc;
2217
2218 pc = preempt_count();
5168ae50 2219 preempt_disable_notrace();
9ea21c1e 2220 cpu = raw_smp_processor_id();
245b2e70 2221 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
2222
2223 if (disabled != 1)
2224 goto out;
2225
2226 local_save_flags(flags);
2227
e77405ad
SR
2228 event = trace_current_buffer_lock_reserve(&buffer,
2229 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
2230 flags, pc);
2231 if (!event)
2232 goto out;
2233 entry = ring_buffer_event_data(event);
2234 entry->ip = ip;
2235 entry->parent_ip = parent_ip;
2236
0d5c6e1c 2237 trace_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
2238
2239 out:
245b2e70 2240 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5168ae50 2241 preempt_enable_notrace();
9ea21c1e
SR
2242}
2243
2244static struct ftrace_ops trace_ops __initdata =
2245{
2246 .func = function_test_events_call,
4740974a 2247 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
9ea21c1e
SR
2248};
2249
2250static __init void event_trace_self_test_with_function(void)
2251{
17bb615a
SR
2252 int ret;
2253 ret = register_ftrace_function(&trace_ops);
2254 if (WARN_ON(ret < 0)) {
2255 pr_info("Failed to enable function tracer for event tests\n");
2256 return;
2257 }
9ea21c1e
SR
2258 pr_info("Running tests again, along with the function tracer\n");
2259 event_trace_self_tests();
2260 unregister_ftrace_function(&trace_ops);
2261}
2262#else
2263static __init void event_trace_self_test_with_function(void)
2264{
2265}
2266#endif
2267
2268static __init int event_trace_self_tests_init(void)
2269{
020e5f85
LZ
2270 if (!tracing_selftest_disabled) {
2271 event_trace_self_tests();
2272 event_trace_self_test_with_function();
2273 }
e6187007
SR
2274
2275 return 0;
2276}
2277
28d20e2d 2278late_initcall(event_trace_self_tests_init);
e6187007
SR
2279
2280#endif
This page took 0.450177 seconds and 5 git commands to generate.