tracing: Use TRACE_MAX_PRINT instead of constant
[deliverable/linux.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
38
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43
44 /* Double loops, do not use break, only goto's work */
45 #define do_for_each_event_file(tr, file) \
46 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
47 list_for_each_entry(file, &tr->events, list)
48
49 #define do_for_each_event_file_safe(tr, file) \
50 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
51 struct ftrace_event_file *___n; \
52 list_for_each_entry_safe(file, ___n, &tr->events, list)
53
54 #define while_for_each_event_file() \
55 }
56
57 struct list_head *
58 trace_get_fields(struct ftrace_event_call *event_call)
59 {
60 if (!event_call->class->get_fields)
61 return &event_call->class->fields;
62 return event_call->class->get_fields(event_call);
63 }
64
65 static int __trace_define_field(struct list_head *head, const char *type,
66 const char *name, int offset, int size,
67 int is_signed, int filter_type)
68 {
69 struct ftrace_event_field *field;
70
71 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
72 if (!field)
73 goto err;
74
75 field->name = name;
76 field->type = type;
77
78 if (filter_type == FILTER_OTHER)
79 field->filter_type = filter_assign_type(type);
80 else
81 field->filter_type = filter_type;
82
83 field->offset = offset;
84 field->size = size;
85 field->is_signed = is_signed;
86
87 list_add(&field->link, head);
88
89 return 0;
90
91 err:
92 kmem_cache_free(field_cachep, field);
93
94 return -ENOMEM;
95 }
96
97 int trace_define_field(struct ftrace_event_call *call, const char *type,
98 const char *name, int offset, int size, int is_signed,
99 int filter_type)
100 {
101 struct list_head *head;
102
103 if (WARN_ON(!call->class))
104 return 0;
105
106 head = trace_get_fields(call);
107 return __trace_define_field(head, type, name, offset, size,
108 is_signed, filter_type);
109 }
110 EXPORT_SYMBOL_GPL(trace_define_field);
111
112 #define __common_field(type, item) \
113 ret = __trace_define_field(&ftrace_common_fields, #type, \
114 "common_" #item, \
115 offsetof(typeof(ent), item), \
116 sizeof(ent.item), \
117 is_signed_type(type), FILTER_OTHER); \
118 if (ret) \
119 return ret;
120
121 static int trace_define_common_fields(void)
122 {
123 int ret;
124 struct trace_entry ent;
125
126 __common_field(unsigned short, type);
127 __common_field(unsigned char, flags);
128 __common_field(unsigned char, preempt_count);
129 __common_field(int, pid);
130
131 return ret;
132 }
133
134 void trace_destroy_fields(struct ftrace_event_call *call)
135 {
136 struct ftrace_event_field *field, *next;
137 struct list_head *head;
138
139 head = trace_get_fields(call);
140 list_for_each_entry_safe(field, next, head, link) {
141 list_del(&field->link);
142 kmem_cache_free(field_cachep, field);
143 }
144 }
145
146 int trace_event_raw_init(struct ftrace_event_call *call)
147 {
148 int id;
149
150 id = register_ftrace_event(&call->event);
151 if (!id)
152 return -ENODEV;
153
154 return 0;
155 }
156 EXPORT_SYMBOL_GPL(trace_event_raw_init);
157
158 int ftrace_event_reg(struct ftrace_event_call *call,
159 enum trace_reg type, void *data)
160 {
161 struct ftrace_event_file *file = data;
162
163 switch (type) {
164 case TRACE_REG_REGISTER:
165 return tracepoint_probe_register(call->name,
166 call->class->probe,
167 file);
168 case TRACE_REG_UNREGISTER:
169 tracepoint_probe_unregister(call->name,
170 call->class->probe,
171 file);
172 return 0;
173
174 #ifdef CONFIG_PERF_EVENTS
175 case TRACE_REG_PERF_REGISTER:
176 return tracepoint_probe_register(call->name,
177 call->class->perf_probe,
178 call);
179 case TRACE_REG_PERF_UNREGISTER:
180 tracepoint_probe_unregister(call->name,
181 call->class->perf_probe,
182 call);
183 return 0;
184 case TRACE_REG_PERF_OPEN:
185 case TRACE_REG_PERF_CLOSE:
186 case TRACE_REG_PERF_ADD:
187 case TRACE_REG_PERF_DEL:
188 return 0;
189 #endif
190 }
191 return 0;
192 }
193 EXPORT_SYMBOL_GPL(ftrace_event_reg);
194
195 void trace_event_enable_cmd_record(bool enable)
196 {
197 struct ftrace_event_file *file;
198 struct trace_array *tr;
199
200 mutex_lock(&event_mutex);
201 do_for_each_event_file(tr, file) {
202
203 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
204 continue;
205
206 if (enable) {
207 tracing_start_cmdline_record();
208 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
209 } else {
210 tracing_stop_cmdline_record();
211 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
212 }
213 } while_for_each_event_file();
214 mutex_unlock(&event_mutex);
215 }
216
217 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
218 int enable, int soft_disable)
219 {
220 struct ftrace_event_call *call = file->event_call;
221 int ret = 0;
222 int disable;
223
224 switch (enable) {
225 case 0:
226 /*
227 * When soft_disable is set and enable is cleared, we want
228 * to clear the SOFT_DISABLED flag but leave the event in the
229 * state that it was. That is, if the event was enabled and
230 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
231 * is set we do not want the event to be enabled before we
232 * clear the bit.
233 *
234 * When soft_disable is not set but the SOFT_MODE flag is,
235 * we do nothing. Do not disable the tracepoint, otherwise
236 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
237 */
238 if (soft_disable) {
239 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
240 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
241 } else
242 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
243
244 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
245 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
246 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
247 tracing_stop_cmdline_record();
248 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
249 }
250 call->class->reg(call, TRACE_REG_UNREGISTER, file);
251 }
252 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
253 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
254 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
255 break;
256 case 1:
257 /*
258 * When soft_disable is set and enable is set, we want to
259 * register the tracepoint for the event, but leave the event
260 * as is. That means, if the event was already enabled, we do
261 * nothing (but set SOFT_MODE). If the event is disabled, we
262 * set SOFT_DISABLED before enabling the event tracepoint, so
263 * it still seems to be disabled.
264 */
265 if (!soft_disable)
266 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
267 else
268 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
269
270 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
271
272 /* Keep the event disabled, when going to SOFT_MODE. */
273 if (soft_disable)
274 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
275
276 if (trace_flags & TRACE_ITER_RECORD_CMD) {
277 tracing_start_cmdline_record();
278 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
279 }
280 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
281 if (ret) {
282 tracing_stop_cmdline_record();
283 pr_info("event trace: Could not enable event "
284 "%s\n", call->name);
285 break;
286 }
287 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
288
289 /* WAS_ENABLED gets set but never cleared. */
290 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
291 }
292 break;
293 }
294
295 return ret;
296 }
297
298 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
299 int enable)
300 {
301 return __ftrace_event_enable_disable(file, enable, 0);
302 }
303
304 static void ftrace_clear_events(struct trace_array *tr)
305 {
306 struct ftrace_event_file *file;
307
308 mutex_lock(&event_mutex);
309 list_for_each_entry(file, &tr->events, list) {
310 ftrace_event_enable_disable(file, 0);
311 }
312 mutex_unlock(&event_mutex);
313 }
314
315 static void __put_system(struct event_subsystem *system)
316 {
317 struct event_filter *filter = system->filter;
318
319 WARN_ON_ONCE(system->ref_count == 0);
320 if (--system->ref_count)
321 return;
322
323 list_del(&system->list);
324
325 if (filter) {
326 kfree(filter->filter_string);
327 kfree(filter);
328 }
329 kfree(system);
330 }
331
332 static void __get_system(struct event_subsystem *system)
333 {
334 WARN_ON_ONCE(system->ref_count == 0);
335 system->ref_count++;
336 }
337
338 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
339 {
340 WARN_ON_ONCE(dir->ref_count == 0);
341 dir->ref_count++;
342 __get_system(dir->subsystem);
343 }
344
345 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
346 {
347 WARN_ON_ONCE(dir->ref_count == 0);
348 /* If the subsystem is about to be freed, the dir must be too */
349 WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
350
351 __put_system(dir->subsystem);
352 if (!--dir->ref_count)
353 kfree(dir);
354 }
355
356 static void put_system(struct ftrace_subsystem_dir *dir)
357 {
358 mutex_lock(&event_mutex);
359 __put_system_dir(dir);
360 mutex_unlock(&event_mutex);
361 }
362
363 /*
364 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
365 */
366 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
367 const char *sub, const char *event, int set)
368 {
369 struct ftrace_event_file *file;
370 struct ftrace_event_call *call;
371 int ret = -EINVAL;
372
373 mutex_lock(&event_mutex);
374 list_for_each_entry(file, &tr->events, list) {
375
376 call = file->event_call;
377
378 if (!call->name || !call->class || !call->class->reg)
379 continue;
380
381 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
382 continue;
383
384 if (match &&
385 strcmp(match, call->name) != 0 &&
386 strcmp(match, call->class->system) != 0)
387 continue;
388
389 if (sub && strcmp(sub, call->class->system) != 0)
390 continue;
391
392 if (event && strcmp(event, call->name) != 0)
393 continue;
394
395 ftrace_event_enable_disable(file, set);
396
397 ret = 0;
398 }
399 mutex_unlock(&event_mutex);
400
401 return ret;
402 }
403
404 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
405 {
406 char *event = NULL, *sub = NULL, *match;
407
408 /*
409 * The buf format can be <subsystem>:<event-name>
410 * *:<event-name> means any event by that name.
411 * :<event-name> is the same.
412 *
413 * <subsystem>:* means all events in that subsystem
414 * <subsystem>: means the same.
415 *
416 * <name> (no ':') means all events in a subsystem with
417 * the name <name> or any event that matches <name>
418 */
419
420 match = strsep(&buf, ":");
421 if (buf) {
422 sub = match;
423 event = buf;
424 match = NULL;
425
426 if (!strlen(sub) || strcmp(sub, "*") == 0)
427 sub = NULL;
428 if (!strlen(event) || strcmp(event, "*") == 0)
429 event = NULL;
430 }
431
432 return __ftrace_set_clr_event(tr, match, sub, event, set);
433 }
434
435 /**
436 * trace_set_clr_event - enable or disable an event
437 * @system: system name to match (NULL for any system)
438 * @event: event name to match (NULL for all events, within system)
439 * @set: 1 to enable, 0 to disable
440 *
441 * This is a way for other parts of the kernel to enable or disable
442 * event recording.
443 *
444 * Returns 0 on success, -EINVAL if the parameters do not match any
445 * registered events.
446 */
447 int trace_set_clr_event(const char *system, const char *event, int set)
448 {
449 struct trace_array *tr = top_trace_array();
450
451 return __ftrace_set_clr_event(tr, NULL, system, event, set);
452 }
453 EXPORT_SYMBOL_GPL(trace_set_clr_event);
454
455 /* 128 should be much more than enough */
456 #define EVENT_BUF_SIZE 127
457
458 static ssize_t
459 ftrace_event_write(struct file *file, const char __user *ubuf,
460 size_t cnt, loff_t *ppos)
461 {
462 struct trace_parser parser;
463 struct seq_file *m = file->private_data;
464 struct trace_array *tr = m->private;
465 ssize_t read, ret;
466
467 if (!cnt)
468 return 0;
469
470 ret = tracing_update_buffers();
471 if (ret < 0)
472 return ret;
473
474 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
475 return -ENOMEM;
476
477 read = trace_get_user(&parser, ubuf, cnt, ppos);
478
479 if (read >= 0 && trace_parser_loaded((&parser))) {
480 int set = 1;
481
482 if (*parser.buffer == '!')
483 set = 0;
484
485 parser.buffer[parser.idx] = 0;
486
487 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
488 if (ret)
489 goto out_put;
490 }
491
492 ret = read;
493
494 out_put:
495 trace_parser_put(&parser);
496
497 return ret;
498 }
499
500 static void *
501 t_next(struct seq_file *m, void *v, loff_t *pos)
502 {
503 struct ftrace_event_file *file = v;
504 struct ftrace_event_call *call;
505 struct trace_array *tr = m->private;
506
507 (*pos)++;
508
509 list_for_each_entry_continue(file, &tr->events, list) {
510 call = file->event_call;
511 /*
512 * The ftrace subsystem is for showing formats only.
513 * They can not be enabled or disabled via the event files.
514 */
515 if (call->class && call->class->reg)
516 return file;
517 }
518
519 return NULL;
520 }
521
522 static void *t_start(struct seq_file *m, loff_t *pos)
523 {
524 struct ftrace_event_file *file;
525 struct trace_array *tr = m->private;
526 loff_t l;
527
528 mutex_lock(&event_mutex);
529
530 file = list_entry(&tr->events, struct ftrace_event_file, list);
531 for (l = 0; l <= *pos; ) {
532 file = t_next(m, file, &l);
533 if (!file)
534 break;
535 }
536 return file;
537 }
538
539 static void *
540 s_next(struct seq_file *m, void *v, loff_t *pos)
541 {
542 struct ftrace_event_file *file = v;
543 struct trace_array *tr = m->private;
544
545 (*pos)++;
546
547 list_for_each_entry_continue(file, &tr->events, list) {
548 if (file->flags & FTRACE_EVENT_FL_ENABLED)
549 return file;
550 }
551
552 return NULL;
553 }
554
555 static void *s_start(struct seq_file *m, loff_t *pos)
556 {
557 struct ftrace_event_file *file;
558 struct trace_array *tr = m->private;
559 loff_t l;
560
561 mutex_lock(&event_mutex);
562
563 file = list_entry(&tr->events, struct ftrace_event_file, list);
564 for (l = 0; l <= *pos; ) {
565 file = s_next(m, file, &l);
566 if (!file)
567 break;
568 }
569 return file;
570 }
571
572 static int t_show(struct seq_file *m, void *v)
573 {
574 struct ftrace_event_file *file = v;
575 struct ftrace_event_call *call = file->event_call;
576
577 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
578 seq_printf(m, "%s:", call->class->system);
579 seq_printf(m, "%s\n", call->name);
580
581 return 0;
582 }
583
584 static void t_stop(struct seq_file *m, void *p)
585 {
586 mutex_unlock(&event_mutex);
587 }
588
589 static ssize_t
590 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
591 loff_t *ppos)
592 {
593 struct ftrace_event_file *file = filp->private_data;
594 char *buf;
595
596 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
597 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
598 buf = "0*\n";
599 else
600 buf = "1\n";
601 } else
602 buf = "0\n";
603
604 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
605 }
606
607 static ssize_t
608 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
609 loff_t *ppos)
610 {
611 struct ftrace_event_file *file = filp->private_data;
612 unsigned long val;
613 int ret;
614
615 if (!file)
616 return -EINVAL;
617
618 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
619 if (ret)
620 return ret;
621
622 ret = tracing_update_buffers();
623 if (ret < 0)
624 return ret;
625
626 switch (val) {
627 case 0:
628 case 1:
629 mutex_lock(&event_mutex);
630 ret = ftrace_event_enable_disable(file, val);
631 mutex_unlock(&event_mutex);
632 break;
633
634 default:
635 return -EINVAL;
636 }
637
638 *ppos += cnt;
639
640 return ret ? ret : cnt;
641 }
642
643 static ssize_t
644 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
645 loff_t *ppos)
646 {
647 const char set_to_char[4] = { '?', '0', '1', 'X' };
648 struct ftrace_subsystem_dir *dir = filp->private_data;
649 struct event_subsystem *system = dir->subsystem;
650 struct ftrace_event_call *call;
651 struct ftrace_event_file *file;
652 struct trace_array *tr = dir->tr;
653 char buf[2];
654 int set = 0;
655 int ret;
656
657 mutex_lock(&event_mutex);
658 list_for_each_entry(file, &tr->events, list) {
659 call = file->event_call;
660 if (!call->name || !call->class || !call->class->reg)
661 continue;
662
663 if (system && strcmp(call->class->system, system->name) != 0)
664 continue;
665
666 /*
667 * We need to find out if all the events are set
668 * or if all events or cleared, or if we have
669 * a mixture.
670 */
671 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
672
673 /*
674 * If we have a mixture, no need to look further.
675 */
676 if (set == 3)
677 break;
678 }
679 mutex_unlock(&event_mutex);
680
681 buf[0] = set_to_char[set];
682 buf[1] = '\n';
683
684 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
685
686 return ret;
687 }
688
689 static ssize_t
690 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
691 loff_t *ppos)
692 {
693 struct ftrace_subsystem_dir *dir = filp->private_data;
694 struct event_subsystem *system = dir->subsystem;
695 const char *name = NULL;
696 unsigned long val;
697 ssize_t ret;
698
699 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
700 if (ret)
701 return ret;
702
703 ret = tracing_update_buffers();
704 if (ret < 0)
705 return ret;
706
707 if (val != 0 && val != 1)
708 return -EINVAL;
709
710 /*
711 * Opening of "enable" adds a ref count to system,
712 * so the name is safe to use.
713 */
714 if (system)
715 name = system->name;
716
717 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
718 if (ret)
719 goto out;
720
721 ret = cnt;
722
723 out:
724 *ppos += cnt;
725
726 return ret;
727 }
728
729 enum {
730 FORMAT_HEADER = 1,
731 FORMAT_FIELD_SEPERATOR = 2,
732 FORMAT_PRINTFMT = 3,
733 };
734
735 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
736 {
737 struct ftrace_event_call *call = m->private;
738 struct ftrace_event_field *field;
739 struct list_head *common_head = &ftrace_common_fields;
740 struct list_head *head = trace_get_fields(call);
741
742 (*pos)++;
743
744 switch ((unsigned long)v) {
745 case FORMAT_HEADER:
746 if (unlikely(list_empty(common_head)))
747 return NULL;
748
749 field = list_entry(common_head->prev,
750 struct ftrace_event_field, link);
751 return field;
752
753 case FORMAT_FIELD_SEPERATOR:
754 if (unlikely(list_empty(head)))
755 return NULL;
756
757 field = list_entry(head->prev, struct ftrace_event_field, link);
758 return field;
759
760 case FORMAT_PRINTFMT:
761 /* all done */
762 return NULL;
763 }
764
765 field = v;
766 if (field->link.prev == common_head)
767 return (void *)FORMAT_FIELD_SEPERATOR;
768 else if (field->link.prev == head)
769 return (void *)FORMAT_PRINTFMT;
770
771 field = list_entry(field->link.prev, struct ftrace_event_field, link);
772
773 return field;
774 }
775
776 static void *f_start(struct seq_file *m, loff_t *pos)
777 {
778 loff_t l = 0;
779 void *p;
780
781 /* Start by showing the header */
782 if (!*pos)
783 return (void *)FORMAT_HEADER;
784
785 p = (void *)FORMAT_HEADER;
786 do {
787 p = f_next(m, p, &l);
788 } while (p && l < *pos);
789
790 return p;
791 }
792
793 static int f_show(struct seq_file *m, void *v)
794 {
795 struct ftrace_event_call *call = m->private;
796 struct ftrace_event_field *field;
797 const char *array_descriptor;
798
799 switch ((unsigned long)v) {
800 case FORMAT_HEADER:
801 seq_printf(m, "name: %s\n", call->name);
802 seq_printf(m, "ID: %d\n", call->event.type);
803 seq_printf(m, "format:\n");
804 return 0;
805
806 case FORMAT_FIELD_SEPERATOR:
807 seq_putc(m, '\n');
808 return 0;
809
810 case FORMAT_PRINTFMT:
811 seq_printf(m, "\nprint fmt: %s\n",
812 call->print_fmt);
813 return 0;
814 }
815
816 field = v;
817
818 /*
819 * Smartly shows the array type(except dynamic array).
820 * Normal:
821 * field:TYPE VAR
822 * If TYPE := TYPE[LEN], it is shown:
823 * field:TYPE VAR[LEN]
824 */
825 array_descriptor = strchr(field->type, '[');
826
827 if (!strncmp(field->type, "__data_loc", 10))
828 array_descriptor = NULL;
829
830 if (!array_descriptor)
831 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
832 field->type, field->name, field->offset,
833 field->size, !!field->is_signed);
834 else
835 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
836 (int)(array_descriptor - field->type),
837 field->type, field->name,
838 array_descriptor, field->offset,
839 field->size, !!field->is_signed);
840
841 return 0;
842 }
843
844 static void f_stop(struct seq_file *m, void *p)
845 {
846 }
847
848 static const struct seq_operations trace_format_seq_ops = {
849 .start = f_start,
850 .next = f_next,
851 .stop = f_stop,
852 .show = f_show,
853 };
854
855 static int trace_format_open(struct inode *inode, struct file *file)
856 {
857 struct ftrace_event_call *call = inode->i_private;
858 struct seq_file *m;
859 int ret;
860
861 ret = seq_open(file, &trace_format_seq_ops);
862 if (ret < 0)
863 return ret;
864
865 m = file->private_data;
866 m->private = call;
867
868 return 0;
869 }
870
871 static ssize_t
872 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
873 {
874 struct ftrace_event_call *call = filp->private_data;
875 struct trace_seq *s;
876 int r;
877
878 if (*ppos)
879 return 0;
880
881 s = kmalloc(sizeof(*s), GFP_KERNEL);
882 if (!s)
883 return -ENOMEM;
884
885 trace_seq_init(s);
886 trace_seq_printf(s, "%d\n", call->event.type);
887
888 r = simple_read_from_buffer(ubuf, cnt, ppos,
889 s->buffer, s->len);
890 kfree(s);
891 return r;
892 }
893
894 static ssize_t
895 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
896 loff_t *ppos)
897 {
898 struct ftrace_event_call *call = filp->private_data;
899 struct trace_seq *s;
900 int r;
901
902 if (*ppos)
903 return 0;
904
905 s = kmalloc(sizeof(*s), GFP_KERNEL);
906 if (!s)
907 return -ENOMEM;
908
909 trace_seq_init(s);
910
911 print_event_filter(call, s);
912 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
913
914 kfree(s);
915
916 return r;
917 }
918
919 static ssize_t
920 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
921 loff_t *ppos)
922 {
923 struct ftrace_event_call *call = filp->private_data;
924 char *buf;
925 int err;
926
927 if (cnt >= PAGE_SIZE)
928 return -EINVAL;
929
930 buf = (char *)__get_free_page(GFP_TEMPORARY);
931 if (!buf)
932 return -ENOMEM;
933
934 if (copy_from_user(buf, ubuf, cnt)) {
935 free_page((unsigned long) buf);
936 return -EFAULT;
937 }
938 buf[cnt] = '\0';
939
940 err = apply_event_filter(call, buf);
941 free_page((unsigned long) buf);
942 if (err < 0)
943 return err;
944
945 *ppos += cnt;
946
947 return cnt;
948 }
949
950 static LIST_HEAD(event_subsystems);
951
952 static int subsystem_open(struct inode *inode, struct file *filp)
953 {
954 struct event_subsystem *system = NULL;
955 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
956 struct trace_array *tr;
957 int ret;
958
959 /* Make sure the system still exists */
960 mutex_lock(&event_mutex);
961 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
962 list_for_each_entry(dir, &tr->systems, list) {
963 if (dir == inode->i_private) {
964 /* Don't open systems with no events */
965 if (dir->nr_events) {
966 __get_system_dir(dir);
967 system = dir->subsystem;
968 }
969 goto exit_loop;
970 }
971 }
972 }
973 exit_loop:
974 mutex_unlock(&event_mutex);
975
976 if (!system)
977 return -ENODEV;
978
979 /* Some versions of gcc think dir can be uninitialized here */
980 WARN_ON(!dir);
981
982 ret = tracing_open_generic(inode, filp);
983 if (ret < 0)
984 put_system(dir);
985
986 return ret;
987 }
988
989 static int system_tr_open(struct inode *inode, struct file *filp)
990 {
991 struct ftrace_subsystem_dir *dir;
992 struct trace_array *tr = inode->i_private;
993 int ret;
994
995 /* Make a temporary dir that has no system but points to tr */
996 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
997 if (!dir)
998 return -ENOMEM;
999
1000 dir->tr = tr;
1001
1002 ret = tracing_open_generic(inode, filp);
1003 if (ret < 0)
1004 kfree(dir);
1005
1006 filp->private_data = dir;
1007
1008 return ret;
1009 }
1010
1011 static int subsystem_release(struct inode *inode, struct file *file)
1012 {
1013 struct ftrace_subsystem_dir *dir = file->private_data;
1014
1015 /*
1016 * If dir->subsystem is NULL, then this is a temporary
1017 * descriptor that was made for a trace_array to enable
1018 * all subsystems.
1019 */
1020 if (dir->subsystem)
1021 put_system(dir);
1022 else
1023 kfree(dir);
1024
1025 return 0;
1026 }
1027
1028 static ssize_t
1029 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1030 loff_t *ppos)
1031 {
1032 struct ftrace_subsystem_dir *dir = filp->private_data;
1033 struct event_subsystem *system = dir->subsystem;
1034 struct trace_seq *s;
1035 int r;
1036
1037 if (*ppos)
1038 return 0;
1039
1040 s = kmalloc(sizeof(*s), GFP_KERNEL);
1041 if (!s)
1042 return -ENOMEM;
1043
1044 trace_seq_init(s);
1045
1046 print_subsystem_event_filter(system, s);
1047 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1048
1049 kfree(s);
1050
1051 return r;
1052 }
1053
1054 static ssize_t
1055 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1056 loff_t *ppos)
1057 {
1058 struct ftrace_subsystem_dir *dir = filp->private_data;
1059 char *buf;
1060 int err;
1061
1062 if (cnt >= PAGE_SIZE)
1063 return -EINVAL;
1064
1065 buf = (char *)__get_free_page(GFP_TEMPORARY);
1066 if (!buf)
1067 return -ENOMEM;
1068
1069 if (copy_from_user(buf, ubuf, cnt)) {
1070 free_page((unsigned long) buf);
1071 return -EFAULT;
1072 }
1073 buf[cnt] = '\0';
1074
1075 err = apply_subsystem_event_filter(dir, buf);
1076 free_page((unsigned long) buf);
1077 if (err < 0)
1078 return err;
1079
1080 *ppos += cnt;
1081
1082 return cnt;
1083 }
1084
1085 static ssize_t
1086 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1087 {
1088 int (*func)(struct trace_seq *s) = filp->private_data;
1089 struct trace_seq *s;
1090 int r;
1091
1092 if (*ppos)
1093 return 0;
1094
1095 s = kmalloc(sizeof(*s), GFP_KERNEL);
1096 if (!s)
1097 return -ENOMEM;
1098
1099 trace_seq_init(s);
1100
1101 func(s);
1102 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1103
1104 kfree(s);
1105
1106 return r;
1107 }
1108
1109 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1110 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1111
1112 static const struct seq_operations show_event_seq_ops = {
1113 .start = t_start,
1114 .next = t_next,
1115 .show = t_show,
1116 .stop = t_stop,
1117 };
1118
1119 static const struct seq_operations show_set_event_seq_ops = {
1120 .start = s_start,
1121 .next = s_next,
1122 .show = t_show,
1123 .stop = t_stop,
1124 };
1125
1126 static const struct file_operations ftrace_avail_fops = {
1127 .open = ftrace_event_avail_open,
1128 .read = seq_read,
1129 .llseek = seq_lseek,
1130 .release = seq_release,
1131 };
1132
1133 static const struct file_operations ftrace_set_event_fops = {
1134 .open = ftrace_event_set_open,
1135 .read = seq_read,
1136 .write = ftrace_event_write,
1137 .llseek = seq_lseek,
1138 .release = seq_release,
1139 };
1140
1141 static const struct file_operations ftrace_enable_fops = {
1142 .open = tracing_open_generic,
1143 .read = event_enable_read,
1144 .write = event_enable_write,
1145 .llseek = default_llseek,
1146 };
1147
1148 static const struct file_operations ftrace_event_format_fops = {
1149 .open = trace_format_open,
1150 .read = seq_read,
1151 .llseek = seq_lseek,
1152 .release = seq_release,
1153 };
1154
1155 static const struct file_operations ftrace_event_id_fops = {
1156 .open = tracing_open_generic,
1157 .read = event_id_read,
1158 .llseek = default_llseek,
1159 };
1160
1161 static const struct file_operations ftrace_event_filter_fops = {
1162 .open = tracing_open_generic,
1163 .read = event_filter_read,
1164 .write = event_filter_write,
1165 .llseek = default_llseek,
1166 };
1167
1168 static const struct file_operations ftrace_subsystem_filter_fops = {
1169 .open = subsystem_open,
1170 .read = subsystem_filter_read,
1171 .write = subsystem_filter_write,
1172 .llseek = default_llseek,
1173 .release = subsystem_release,
1174 };
1175
1176 static const struct file_operations ftrace_system_enable_fops = {
1177 .open = subsystem_open,
1178 .read = system_enable_read,
1179 .write = system_enable_write,
1180 .llseek = default_llseek,
1181 .release = subsystem_release,
1182 };
1183
1184 static const struct file_operations ftrace_tr_enable_fops = {
1185 .open = system_tr_open,
1186 .read = system_enable_read,
1187 .write = system_enable_write,
1188 .llseek = default_llseek,
1189 .release = subsystem_release,
1190 };
1191
1192 static const struct file_operations ftrace_show_header_fops = {
1193 .open = tracing_open_generic,
1194 .read = show_header,
1195 .llseek = default_llseek,
1196 };
1197
1198 static int
1199 ftrace_event_open(struct inode *inode, struct file *file,
1200 const struct seq_operations *seq_ops)
1201 {
1202 struct seq_file *m;
1203 int ret;
1204
1205 ret = seq_open(file, seq_ops);
1206 if (ret < 0)
1207 return ret;
1208 m = file->private_data;
1209 /* copy tr over to seq ops */
1210 m->private = inode->i_private;
1211
1212 return ret;
1213 }
1214
1215 static int
1216 ftrace_event_avail_open(struct inode *inode, struct file *file)
1217 {
1218 const struct seq_operations *seq_ops = &show_event_seq_ops;
1219
1220 return ftrace_event_open(inode, file, seq_ops);
1221 }
1222
1223 static int
1224 ftrace_event_set_open(struct inode *inode, struct file *file)
1225 {
1226 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1227 struct trace_array *tr = inode->i_private;
1228
1229 if ((file->f_mode & FMODE_WRITE) &&
1230 (file->f_flags & O_TRUNC))
1231 ftrace_clear_events(tr);
1232
1233 return ftrace_event_open(inode, file, seq_ops);
1234 }
1235
1236 static struct event_subsystem *
1237 create_new_subsystem(const char *name)
1238 {
1239 struct event_subsystem *system;
1240
1241 /* need to create new entry */
1242 system = kmalloc(sizeof(*system), GFP_KERNEL);
1243 if (!system)
1244 return NULL;
1245
1246 system->ref_count = 1;
1247 system->name = name;
1248
1249 system->filter = NULL;
1250
1251 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1252 if (!system->filter)
1253 goto out_free;
1254
1255 list_add(&system->list, &event_subsystems);
1256
1257 return system;
1258
1259 out_free:
1260 kfree(system);
1261 return NULL;
1262 }
1263
1264 static struct dentry *
1265 event_subsystem_dir(struct trace_array *tr, const char *name,
1266 struct ftrace_event_file *file, struct dentry *parent)
1267 {
1268 struct ftrace_subsystem_dir *dir;
1269 struct event_subsystem *system;
1270 struct dentry *entry;
1271
1272 /* First see if we did not already create this dir */
1273 list_for_each_entry(dir, &tr->systems, list) {
1274 system = dir->subsystem;
1275 if (strcmp(system->name, name) == 0) {
1276 dir->nr_events++;
1277 file->system = dir;
1278 return dir->entry;
1279 }
1280 }
1281
1282 /* Now see if the system itself exists. */
1283 list_for_each_entry(system, &event_subsystems, list) {
1284 if (strcmp(system->name, name) == 0)
1285 break;
1286 }
1287 /* Reset system variable when not found */
1288 if (&system->list == &event_subsystems)
1289 system = NULL;
1290
1291 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1292 if (!dir)
1293 goto out_fail;
1294
1295 if (!system) {
1296 system = create_new_subsystem(name);
1297 if (!system)
1298 goto out_free;
1299 } else
1300 __get_system(system);
1301
1302 dir->entry = debugfs_create_dir(name, parent);
1303 if (!dir->entry) {
1304 pr_warning("Failed to create system directory %s\n", name);
1305 __put_system(system);
1306 goto out_free;
1307 }
1308
1309 dir->tr = tr;
1310 dir->ref_count = 1;
1311 dir->nr_events = 1;
1312 dir->subsystem = system;
1313 file->system = dir;
1314
1315 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1316 &ftrace_subsystem_filter_fops);
1317 if (!entry) {
1318 kfree(system->filter);
1319 system->filter = NULL;
1320 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1321 }
1322
1323 trace_create_file("enable", 0644, dir->entry, dir,
1324 &ftrace_system_enable_fops);
1325
1326 list_add(&dir->list, &tr->systems);
1327
1328 return dir->entry;
1329
1330 out_free:
1331 kfree(dir);
1332 out_fail:
1333 /* Only print this message if failed on memory allocation */
1334 if (!dir || !system)
1335 pr_warning("No memory to create event subsystem %s\n",
1336 name);
1337 return NULL;
1338 }
1339
1340 static int
1341 event_create_dir(struct dentry *parent,
1342 struct ftrace_event_file *file,
1343 const struct file_operations *id,
1344 const struct file_operations *enable,
1345 const struct file_operations *filter,
1346 const struct file_operations *format)
1347 {
1348 struct ftrace_event_call *call = file->event_call;
1349 struct trace_array *tr = file->tr;
1350 struct list_head *head;
1351 struct dentry *d_events;
1352 int ret;
1353
1354 /*
1355 * If the trace point header did not define TRACE_SYSTEM
1356 * then the system would be called "TRACE_SYSTEM".
1357 */
1358 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1359 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1360 if (!d_events)
1361 return -ENOMEM;
1362 } else
1363 d_events = parent;
1364
1365 file->dir = debugfs_create_dir(call->name, d_events);
1366 if (!file->dir) {
1367 pr_warning("Could not create debugfs '%s' directory\n",
1368 call->name);
1369 return -1;
1370 }
1371
1372 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1373 trace_create_file("enable", 0644, file->dir, file,
1374 enable);
1375
1376 #ifdef CONFIG_PERF_EVENTS
1377 if (call->event.type && call->class->reg)
1378 trace_create_file("id", 0444, file->dir, call,
1379 id);
1380 #endif
1381
1382 /*
1383 * Other events may have the same class. Only update
1384 * the fields if they are not already defined.
1385 */
1386 head = trace_get_fields(call);
1387 if (list_empty(head)) {
1388 ret = call->class->define_fields(call);
1389 if (ret < 0) {
1390 pr_warning("Could not initialize trace point"
1391 " events/%s\n", call->name);
1392 return -1;
1393 }
1394 }
1395 trace_create_file("filter", 0644, file->dir, call,
1396 filter);
1397
1398 trace_create_file("format", 0444, file->dir, call,
1399 format);
1400
1401 return 0;
1402 }
1403
1404 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1405 {
1406 if (!dir)
1407 return;
1408
1409 if (!--dir->nr_events) {
1410 debugfs_remove_recursive(dir->entry);
1411 list_del(&dir->list);
1412 __put_system_dir(dir);
1413 }
1414 }
1415
1416 static void remove_event_from_tracers(struct ftrace_event_call *call)
1417 {
1418 struct ftrace_event_file *file;
1419 struct trace_array *tr;
1420
1421 do_for_each_event_file_safe(tr, file) {
1422
1423 if (file->event_call != call)
1424 continue;
1425
1426 list_del(&file->list);
1427 debugfs_remove_recursive(file->dir);
1428 remove_subsystem(file->system);
1429 kmem_cache_free(file_cachep, file);
1430
1431 /*
1432 * The do_for_each_event_file_safe() is
1433 * a double loop. After finding the call for this
1434 * trace_array, we use break to jump to the next
1435 * trace_array.
1436 */
1437 break;
1438 } while_for_each_event_file();
1439 }
1440
1441 static void event_remove(struct ftrace_event_call *call)
1442 {
1443 struct trace_array *tr;
1444 struct ftrace_event_file *file;
1445
1446 do_for_each_event_file(tr, file) {
1447 if (file->event_call != call)
1448 continue;
1449 ftrace_event_enable_disable(file, 0);
1450 /*
1451 * The do_for_each_event_file() is
1452 * a double loop. After finding the call for this
1453 * trace_array, we use break to jump to the next
1454 * trace_array.
1455 */
1456 break;
1457 } while_for_each_event_file();
1458
1459 if (call->event.funcs)
1460 __unregister_ftrace_event(&call->event);
1461 remove_event_from_tracers(call);
1462 list_del(&call->list);
1463 }
1464
1465 static int event_init(struct ftrace_event_call *call)
1466 {
1467 int ret = 0;
1468
1469 if (WARN_ON(!call->name))
1470 return -EINVAL;
1471
1472 if (call->class->raw_init) {
1473 ret = call->class->raw_init(call);
1474 if (ret < 0 && ret != -ENOSYS)
1475 pr_warn("Could not initialize trace events/%s\n",
1476 call->name);
1477 }
1478
1479 return ret;
1480 }
1481
1482 static int
1483 __register_event(struct ftrace_event_call *call, struct module *mod)
1484 {
1485 int ret;
1486
1487 ret = event_init(call);
1488 if (ret < 0)
1489 return ret;
1490
1491 list_add(&call->list, &ftrace_events);
1492 call->mod = mod;
1493
1494 return 0;
1495 }
1496
1497 /* Add an event to a trace directory */
1498 static int
1499 __trace_add_new_event(struct ftrace_event_call *call,
1500 struct trace_array *tr,
1501 const struct file_operations *id,
1502 const struct file_operations *enable,
1503 const struct file_operations *filter,
1504 const struct file_operations *format)
1505 {
1506 struct ftrace_event_file *file;
1507
1508 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1509 if (!file)
1510 return -ENOMEM;
1511
1512 file->event_call = call;
1513 file->tr = tr;
1514 list_add(&file->list, &tr->events);
1515
1516 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1517 }
1518
1519 /*
1520 * Just create a decriptor for early init. A descriptor is required
1521 * for enabling events at boot. We want to enable events before
1522 * the filesystem is initialized.
1523 */
1524 static __init int
1525 __trace_early_add_new_event(struct ftrace_event_call *call,
1526 struct trace_array *tr)
1527 {
1528 struct ftrace_event_file *file;
1529
1530 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1531 if (!file)
1532 return -ENOMEM;
1533
1534 file->event_call = call;
1535 file->tr = tr;
1536 list_add(&file->list, &tr->events);
1537
1538 return 0;
1539 }
1540
1541 struct ftrace_module_file_ops;
1542 static void __add_event_to_tracers(struct ftrace_event_call *call,
1543 struct ftrace_module_file_ops *file_ops);
1544
1545 /* Add an additional event_call dynamically */
1546 int trace_add_event_call(struct ftrace_event_call *call)
1547 {
1548 int ret;
1549 mutex_lock(&event_mutex);
1550
1551 ret = __register_event(call, NULL);
1552 if (ret >= 0)
1553 __add_event_to_tracers(call, NULL);
1554
1555 mutex_unlock(&event_mutex);
1556 return ret;
1557 }
1558
1559 /*
1560 * Must be called under locking both of event_mutex and trace_event_mutex.
1561 */
1562 static void __trace_remove_event_call(struct ftrace_event_call *call)
1563 {
1564 event_remove(call);
1565 trace_destroy_fields(call);
1566 destroy_preds(call);
1567 }
1568
1569 /* Remove an event_call */
1570 void trace_remove_event_call(struct ftrace_event_call *call)
1571 {
1572 mutex_lock(&event_mutex);
1573 down_write(&trace_event_mutex);
1574 __trace_remove_event_call(call);
1575 up_write(&trace_event_mutex);
1576 mutex_unlock(&event_mutex);
1577 }
1578
1579 #define for_each_event(event, start, end) \
1580 for (event = start; \
1581 (unsigned long)event < (unsigned long)end; \
1582 event++)
1583
1584 #ifdef CONFIG_MODULES
1585
1586 static LIST_HEAD(ftrace_module_file_list);
1587
1588 /*
1589 * Modules must own their file_operations to keep up with
1590 * reference counting.
1591 */
1592 struct ftrace_module_file_ops {
1593 struct list_head list;
1594 struct module *mod;
1595 struct file_operations id;
1596 struct file_operations enable;
1597 struct file_operations format;
1598 struct file_operations filter;
1599 };
1600
1601 static struct ftrace_module_file_ops *
1602 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1603 {
1604 /*
1605 * As event_calls are added in groups by module,
1606 * when we find one file_ops, we don't need to search for
1607 * each call in that module, as the rest should be the
1608 * same. Only search for a new one if the last one did
1609 * not match.
1610 */
1611 if (file_ops && mod == file_ops->mod)
1612 return file_ops;
1613
1614 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1615 if (file_ops->mod == mod)
1616 return file_ops;
1617 }
1618 return NULL;
1619 }
1620
1621 static struct ftrace_module_file_ops *
1622 trace_create_file_ops(struct module *mod)
1623 {
1624 struct ftrace_module_file_ops *file_ops;
1625
1626 /*
1627 * This is a bit of a PITA. To allow for correct reference
1628 * counting, modules must "own" their file_operations.
1629 * To do this, we allocate the file operations that will be
1630 * used in the event directory.
1631 */
1632
1633 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1634 if (!file_ops)
1635 return NULL;
1636
1637 file_ops->mod = mod;
1638
1639 file_ops->id = ftrace_event_id_fops;
1640 file_ops->id.owner = mod;
1641
1642 file_ops->enable = ftrace_enable_fops;
1643 file_ops->enable.owner = mod;
1644
1645 file_ops->filter = ftrace_event_filter_fops;
1646 file_ops->filter.owner = mod;
1647
1648 file_ops->format = ftrace_event_format_fops;
1649 file_ops->format.owner = mod;
1650
1651 list_add(&file_ops->list, &ftrace_module_file_list);
1652
1653 return file_ops;
1654 }
1655
1656 static void trace_module_add_events(struct module *mod)
1657 {
1658 struct ftrace_module_file_ops *file_ops = NULL;
1659 struct ftrace_event_call **call, **start, **end;
1660
1661 start = mod->trace_events;
1662 end = mod->trace_events + mod->num_trace_events;
1663
1664 if (start == end)
1665 return;
1666
1667 file_ops = trace_create_file_ops(mod);
1668 if (!file_ops)
1669 return;
1670
1671 for_each_event(call, start, end) {
1672 __register_event(*call, mod);
1673 __add_event_to_tracers(*call, file_ops);
1674 }
1675 }
1676
1677 static void trace_module_remove_events(struct module *mod)
1678 {
1679 struct ftrace_module_file_ops *file_ops;
1680 struct ftrace_event_call *call, *p;
1681 bool clear_trace = false;
1682
1683 down_write(&trace_event_mutex);
1684 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1685 if (call->mod == mod) {
1686 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1687 clear_trace = true;
1688 __trace_remove_event_call(call);
1689 }
1690 }
1691
1692 /* Now free the file_operations */
1693 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1694 if (file_ops->mod == mod)
1695 break;
1696 }
1697 if (&file_ops->list != &ftrace_module_file_list) {
1698 list_del(&file_ops->list);
1699 kfree(file_ops);
1700 }
1701 up_write(&trace_event_mutex);
1702
1703 /*
1704 * It is safest to reset the ring buffer if the module being unloaded
1705 * registered any events that were used. The only worry is if
1706 * a new module gets loaded, and takes on the same id as the events
1707 * of this module. When printing out the buffer, traced events left
1708 * over from this module may be passed to the new module events and
1709 * unexpected results may occur.
1710 */
1711 if (clear_trace)
1712 tracing_reset_all_online_cpus();
1713 }
1714
1715 static int trace_module_notify(struct notifier_block *self,
1716 unsigned long val, void *data)
1717 {
1718 struct module *mod = data;
1719
1720 mutex_lock(&event_mutex);
1721 switch (val) {
1722 case MODULE_STATE_COMING:
1723 trace_module_add_events(mod);
1724 break;
1725 case MODULE_STATE_GOING:
1726 trace_module_remove_events(mod);
1727 break;
1728 }
1729 mutex_unlock(&event_mutex);
1730
1731 return 0;
1732 }
1733
1734 static int
1735 __trace_add_new_mod_event(struct ftrace_event_call *call,
1736 struct trace_array *tr,
1737 struct ftrace_module_file_ops *file_ops)
1738 {
1739 return __trace_add_new_event(call, tr,
1740 &file_ops->id, &file_ops->enable,
1741 &file_ops->filter, &file_ops->format);
1742 }
1743
1744 #else
1745 static inline struct ftrace_module_file_ops *
1746 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1747 {
1748 return NULL;
1749 }
1750 static inline int trace_module_notify(struct notifier_block *self,
1751 unsigned long val, void *data)
1752 {
1753 return 0;
1754 }
1755 static inline int
1756 __trace_add_new_mod_event(struct ftrace_event_call *call,
1757 struct trace_array *tr,
1758 struct ftrace_module_file_ops *file_ops)
1759 {
1760 return -ENODEV;
1761 }
1762 #endif /* CONFIG_MODULES */
1763
1764 /* Create a new event directory structure for a trace directory. */
1765 static void
1766 __trace_add_event_dirs(struct trace_array *tr)
1767 {
1768 struct ftrace_module_file_ops *file_ops = NULL;
1769 struct ftrace_event_call *call;
1770 int ret;
1771
1772 list_for_each_entry(call, &ftrace_events, list) {
1773 if (call->mod) {
1774 /*
1775 * Directories for events by modules need to
1776 * keep module ref counts when opened (as we don't
1777 * want the module to disappear when reading one
1778 * of these files). The file_ops keep account of
1779 * the module ref count.
1780 */
1781 file_ops = find_ftrace_file_ops(file_ops, call->mod);
1782 if (!file_ops)
1783 continue; /* Warn? */
1784 ret = __trace_add_new_mod_event(call, tr, file_ops);
1785 if (ret < 0)
1786 pr_warning("Could not create directory for event %s\n",
1787 call->name);
1788 continue;
1789 }
1790 ret = __trace_add_new_event(call, tr,
1791 &ftrace_event_id_fops,
1792 &ftrace_enable_fops,
1793 &ftrace_event_filter_fops,
1794 &ftrace_event_format_fops);
1795 if (ret < 0)
1796 pr_warning("Could not create directory for event %s\n",
1797 call->name);
1798 }
1799 }
1800
1801 #ifdef CONFIG_DYNAMIC_FTRACE
1802
1803 /* Avoid typos */
1804 #define ENABLE_EVENT_STR "enable_event"
1805 #define DISABLE_EVENT_STR "disable_event"
1806
1807 struct event_probe_data {
1808 struct ftrace_event_file *file;
1809 unsigned long count;
1810 int ref;
1811 bool enable;
1812 };
1813
1814 static struct ftrace_event_file *
1815 find_event_file(struct trace_array *tr, const char *system, const char *event)
1816 {
1817 struct ftrace_event_file *file;
1818 struct ftrace_event_call *call;
1819
1820 list_for_each_entry(file, &tr->events, list) {
1821
1822 call = file->event_call;
1823
1824 if (!call->name || !call->class || !call->class->reg)
1825 continue;
1826
1827 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1828 continue;
1829
1830 if (strcmp(event, call->name) == 0 &&
1831 strcmp(system, call->class->system) == 0)
1832 return file;
1833 }
1834 return NULL;
1835 }
1836
1837 static void
1838 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1839 {
1840 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1841 struct event_probe_data *data = *pdata;
1842
1843 if (!data)
1844 return;
1845
1846 if (data->enable)
1847 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1848 else
1849 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1850 }
1851
1852 static void
1853 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1854 {
1855 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1856 struct event_probe_data *data = *pdata;
1857
1858 if (!data)
1859 return;
1860
1861 if (!data->count)
1862 return;
1863
1864 /* Skip if the event is in a state we want to switch to */
1865 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1866 return;
1867
1868 if (data->count != -1)
1869 (data->count)--;
1870
1871 event_enable_probe(ip, parent_ip, _data);
1872 }
1873
1874 static int
1875 event_enable_print(struct seq_file *m, unsigned long ip,
1876 struct ftrace_probe_ops *ops, void *_data)
1877 {
1878 struct event_probe_data *data = _data;
1879
1880 seq_printf(m, "%ps:", (void *)ip);
1881
1882 seq_printf(m, "%s:%s:%s",
1883 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1884 data->file->event_call->class->system,
1885 data->file->event_call->name);
1886
1887 if (data->count == -1)
1888 seq_printf(m, ":unlimited\n");
1889 else
1890 seq_printf(m, ":count=%ld\n", data->count);
1891
1892 return 0;
1893 }
1894
1895 static int
1896 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1897 void **_data)
1898 {
1899 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1900 struct event_probe_data *data = *pdata;
1901
1902 data->ref++;
1903 return 0;
1904 }
1905
1906 static void
1907 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
1908 void **_data)
1909 {
1910 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1911 struct event_probe_data *data = *pdata;
1912
1913 if (WARN_ON_ONCE(data->ref <= 0))
1914 return;
1915
1916 data->ref--;
1917 if (!data->ref) {
1918 /* Remove the SOFT_MODE flag */
1919 __ftrace_event_enable_disable(data->file, 0, 1);
1920 module_put(data->file->event_call->mod);
1921 kfree(data);
1922 }
1923 *pdata = NULL;
1924 }
1925
1926 static struct ftrace_probe_ops event_enable_probe_ops = {
1927 .func = event_enable_probe,
1928 .print = event_enable_print,
1929 .init = event_enable_init,
1930 .free = event_enable_free,
1931 };
1932
1933 static struct ftrace_probe_ops event_enable_count_probe_ops = {
1934 .func = event_enable_count_probe,
1935 .print = event_enable_print,
1936 .init = event_enable_init,
1937 .free = event_enable_free,
1938 };
1939
1940 static struct ftrace_probe_ops event_disable_probe_ops = {
1941 .func = event_enable_probe,
1942 .print = event_enable_print,
1943 .init = event_enable_init,
1944 .free = event_enable_free,
1945 };
1946
1947 static struct ftrace_probe_ops event_disable_count_probe_ops = {
1948 .func = event_enable_count_probe,
1949 .print = event_enable_print,
1950 .init = event_enable_init,
1951 .free = event_enable_free,
1952 };
1953
1954 static int
1955 event_enable_func(struct ftrace_hash *hash,
1956 char *glob, char *cmd, char *param, int enabled)
1957 {
1958 struct trace_array *tr = top_trace_array();
1959 struct ftrace_event_file *file;
1960 struct ftrace_probe_ops *ops;
1961 struct event_probe_data *data;
1962 const char *system;
1963 const char *event;
1964 char *number;
1965 bool enable;
1966 int ret;
1967
1968 /* hash funcs only work with set_ftrace_filter */
1969 if (!enabled)
1970 return -EINVAL;
1971
1972 if (!param)
1973 return -EINVAL;
1974
1975 system = strsep(&param, ":");
1976 if (!param)
1977 return -EINVAL;
1978
1979 event = strsep(&param, ":");
1980
1981 mutex_lock(&event_mutex);
1982
1983 ret = -EINVAL;
1984 file = find_event_file(tr, system, event);
1985 if (!file)
1986 goto out;
1987
1988 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1989
1990 if (enable)
1991 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
1992 else
1993 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
1994
1995 if (glob[0] == '!') {
1996 unregister_ftrace_function_probe_func(glob+1, ops);
1997 ret = 0;
1998 goto out;
1999 }
2000
2001 ret = -ENOMEM;
2002 data = kzalloc(sizeof(*data), GFP_KERNEL);
2003 if (!data)
2004 goto out;
2005
2006 data->enable = enable;
2007 data->count = -1;
2008 data->file = file;
2009
2010 if (!param)
2011 goto out_reg;
2012
2013 number = strsep(&param, ":");
2014
2015 ret = -EINVAL;
2016 if (!strlen(number))
2017 goto out_free;
2018
2019 /*
2020 * We use the callback data field (which is a pointer)
2021 * as our counter.
2022 */
2023 ret = kstrtoul(number, 0, &data->count);
2024 if (ret)
2025 goto out_free;
2026
2027 out_reg:
2028 /* Don't let event modules unload while probe registered */
2029 ret = try_module_get(file->event_call->mod);
2030 if (!ret)
2031 goto out_free;
2032
2033 ret = __ftrace_event_enable_disable(file, 1, 1);
2034 if (ret < 0)
2035 goto out_put;
2036 ret = register_ftrace_function_probe(glob, ops, data);
2037 if (!ret)
2038 goto out_disable;
2039 out:
2040 mutex_unlock(&event_mutex);
2041 return ret;
2042
2043 out_disable:
2044 __ftrace_event_enable_disable(file, 0, 1);
2045 out_put:
2046 module_put(file->event_call->mod);
2047 out_free:
2048 kfree(data);
2049 goto out;
2050 }
2051
2052 static struct ftrace_func_command event_enable_cmd = {
2053 .name = ENABLE_EVENT_STR,
2054 .func = event_enable_func,
2055 };
2056
2057 static struct ftrace_func_command event_disable_cmd = {
2058 .name = DISABLE_EVENT_STR,
2059 .func = event_enable_func,
2060 };
2061
2062 static __init int register_event_cmds(void)
2063 {
2064 int ret;
2065
2066 ret = register_ftrace_command(&event_enable_cmd);
2067 if (WARN_ON(ret < 0))
2068 return ret;
2069 ret = register_ftrace_command(&event_disable_cmd);
2070 if (WARN_ON(ret < 0))
2071 unregister_ftrace_command(&event_enable_cmd);
2072 return ret;
2073 }
2074 #else
2075 static inline int register_event_cmds(void) { return 0; }
2076 #endif /* CONFIG_DYNAMIC_FTRACE */
2077
2078 /*
2079 * The top level array has already had its ftrace_event_file
2080 * descriptors created in order to allow for early events to
2081 * be recorded. This function is called after the debugfs has been
2082 * initialized, and we now have to create the files associated
2083 * to the events.
2084 */
2085 static __init void
2086 __trace_early_add_event_dirs(struct trace_array *tr)
2087 {
2088 struct ftrace_event_file *file;
2089 int ret;
2090
2091
2092 list_for_each_entry(file, &tr->events, list) {
2093 ret = event_create_dir(tr->event_dir, file,
2094 &ftrace_event_id_fops,
2095 &ftrace_enable_fops,
2096 &ftrace_event_filter_fops,
2097 &ftrace_event_format_fops);
2098 if (ret < 0)
2099 pr_warning("Could not create directory for event %s\n",
2100 file->event_call->name);
2101 }
2102 }
2103
2104 /*
2105 * For early boot up, the top trace array requires to have
2106 * a list of events that can be enabled. This must be done before
2107 * the filesystem is set up in order to allow events to be traced
2108 * early.
2109 */
2110 static __init void
2111 __trace_early_add_events(struct trace_array *tr)
2112 {
2113 struct ftrace_event_call *call;
2114 int ret;
2115
2116 list_for_each_entry(call, &ftrace_events, list) {
2117 /* Early boot up should not have any modules loaded */
2118 if (WARN_ON_ONCE(call->mod))
2119 continue;
2120
2121 ret = __trace_early_add_new_event(call, tr);
2122 if (ret < 0)
2123 pr_warning("Could not create early event %s\n",
2124 call->name);
2125 }
2126 }
2127
2128 /* Remove the event directory structure for a trace directory. */
2129 static void
2130 __trace_remove_event_dirs(struct trace_array *tr)
2131 {
2132 struct ftrace_event_file *file, *next;
2133
2134 list_for_each_entry_safe(file, next, &tr->events, list) {
2135 list_del(&file->list);
2136 debugfs_remove_recursive(file->dir);
2137 remove_subsystem(file->system);
2138 kmem_cache_free(file_cachep, file);
2139 }
2140 }
2141
2142 static void
2143 __add_event_to_tracers(struct ftrace_event_call *call,
2144 struct ftrace_module_file_ops *file_ops)
2145 {
2146 struct trace_array *tr;
2147
2148 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2149 if (file_ops)
2150 __trace_add_new_mod_event(call, tr, file_ops);
2151 else
2152 __trace_add_new_event(call, tr,
2153 &ftrace_event_id_fops,
2154 &ftrace_enable_fops,
2155 &ftrace_event_filter_fops,
2156 &ftrace_event_format_fops);
2157 }
2158 }
2159
2160 static struct notifier_block trace_module_nb = {
2161 .notifier_call = trace_module_notify,
2162 .priority = 0,
2163 };
2164
2165 extern struct ftrace_event_call *__start_ftrace_events[];
2166 extern struct ftrace_event_call *__stop_ftrace_events[];
2167
2168 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2169
2170 static __init int setup_trace_event(char *str)
2171 {
2172 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2173 ring_buffer_expanded = true;
2174 tracing_selftest_disabled = true;
2175
2176 return 1;
2177 }
2178 __setup("trace_event=", setup_trace_event);
2179
2180 /* Expects to have event_mutex held when called */
2181 static int
2182 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2183 {
2184 struct dentry *d_events;
2185 struct dentry *entry;
2186
2187 entry = debugfs_create_file("set_event", 0644, parent,
2188 tr, &ftrace_set_event_fops);
2189 if (!entry) {
2190 pr_warning("Could not create debugfs 'set_event' entry\n");
2191 return -ENOMEM;
2192 }
2193
2194 d_events = debugfs_create_dir("events", parent);
2195 if (!d_events) {
2196 pr_warning("Could not create debugfs 'events' directory\n");
2197 return -ENOMEM;
2198 }
2199
2200 /* ring buffer internal formats */
2201 trace_create_file("header_page", 0444, d_events,
2202 ring_buffer_print_page_header,
2203 &ftrace_show_header_fops);
2204
2205 trace_create_file("header_event", 0444, d_events,
2206 ring_buffer_print_entry_header,
2207 &ftrace_show_header_fops);
2208
2209 trace_create_file("enable", 0644, d_events,
2210 tr, &ftrace_tr_enable_fops);
2211
2212 tr->event_dir = d_events;
2213
2214 return 0;
2215 }
2216
2217 /**
2218 * event_trace_add_tracer - add a instance of a trace_array to events
2219 * @parent: The parent dentry to place the files/directories for events in
2220 * @tr: The trace array associated with these events
2221 *
2222 * When a new instance is created, it needs to set up its events
2223 * directory, as well as other files associated with events. It also
2224 * creates the event hierachry in the @parent/events directory.
2225 *
2226 * Returns 0 on success.
2227 */
2228 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2229 {
2230 int ret;
2231
2232 mutex_lock(&event_mutex);
2233
2234 ret = create_event_toplevel_files(parent, tr);
2235 if (ret)
2236 goto out_unlock;
2237
2238 down_write(&trace_event_mutex);
2239 __trace_add_event_dirs(tr);
2240 up_write(&trace_event_mutex);
2241
2242 out_unlock:
2243 mutex_unlock(&event_mutex);
2244
2245 return ret;
2246 }
2247
2248 /*
2249 * The top trace array already had its file descriptors created.
2250 * Now the files themselves need to be created.
2251 */
2252 static __init int
2253 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2254 {
2255 int ret;
2256
2257 mutex_lock(&event_mutex);
2258
2259 ret = create_event_toplevel_files(parent, tr);
2260 if (ret)
2261 goto out_unlock;
2262
2263 down_write(&trace_event_mutex);
2264 __trace_early_add_event_dirs(tr);
2265 up_write(&trace_event_mutex);
2266
2267 out_unlock:
2268 mutex_unlock(&event_mutex);
2269
2270 return ret;
2271 }
2272
2273 int event_trace_del_tracer(struct trace_array *tr)
2274 {
2275 /* Disable any running events */
2276 __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2277
2278 mutex_lock(&event_mutex);
2279
2280 down_write(&trace_event_mutex);
2281 __trace_remove_event_dirs(tr);
2282 debugfs_remove_recursive(tr->event_dir);
2283 up_write(&trace_event_mutex);
2284
2285 tr->event_dir = NULL;
2286
2287 mutex_unlock(&event_mutex);
2288
2289 return 0;
2290 }
2291
2292 static __init int event_trace_memsetup(void)
2293 {
2294 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2295 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2296 return 0;
2297 }
2298
2299 static __init int event_trace_enable(void)
2300 {
2301 struct trace_array *tr = top_trace_array();
2302 struct ftrace_event_call **iter, *call;
2303 char *buf = bootup_event_buf;
2304 char *token;
2305 int ret;
2306
2307 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2308
2309 call = *iter;
2310 ret = event_init(call);
2311 if (!ret)
2312 list_add(&call->list, &ftrace_events);
2313 }
2314
2315 /*
2316 * We need the top trace array to have a working set of trace
2317 * points at early init, before the debug files and directories
2318 * are created. Create the file entries now, and attach them
2319 * to the actual file dentries later.
2320 */
2321 __trace_early_add_events(tr);
2322
2323 while (true) {
2324 token = strsep(&buf, ",");
2325
2326 if (!token)
2327 break;
2328 if (!*token)
2329 continue;
2330
2331 ret = ftrace_set_clr_event(tr, token, 1);
2332 if (ret)
2333 pr_warn("Failed to enable trace event: %s\n", token);
2334 }
2335
2336 trace_printk_start_comm();
2337
2338 register_event_cmds();
2339
2340 return 0;
2341 }
2342
2343 static __init int event_trace_init(void)
2344 {
2345 struct trace_array *tr;
2346 struct dentry *d_tracer;
2347 struct dentry *entry;
2348 int ret;
2349
2350 tr = top_trace_array();
2351
2352 d_tracer = tracing_init_dentry();
2353 if (!d_tracer)
2354 return 0;
2355
2356 entry = debugfs_create_file("available_events", 0444, d_tracer,
2357 tr, &ftrace_avail_fops);
2358 if (!entry)
2359 pr_warning("Could not create debugfs "
2360 "'available_events' entry\n");
2361
2362 if (trace_define_common_fields())
2363 pr_warning("tracing: Failed to allocate common fields");
2364
2365 ret = early_event_add_tracer(d_tracer, tr);
2366 if (ret)
2367 return ret;
2368
2369 ret = register_module_notifier(&trace_module_nb);
2370 if (ret)
2371 pr_warning("Failed to register trace events module notifier\n");
2372
2373 return 0;
2374 }
2375 early_initcall(event_trace_memsetup);
2376 core_initcall(event_trace_enable);
2377 fs_initcall(event_trace_init);
2378
2379 #ifdef CONFIG_FTRACE_STARTUP_TEST
2380
2381 static DEFINE_SPINLOCK(test_spinlock);
2382 static DEFINE_SPINLOCK(test_spinlock_irq);
2383 static DEFINE_MUTEX(test_mutex);
2384
2385 static __init void test_work(struct work_struct *dummy)
2386 {
2387 spin_lock(&test_spinlock);
2388 spin_lock_irq(&test_spinlock_irq);
2389 udelay(1);
2390 spin_unlock_irq(&test_spinlock_irq);
2391 spin_unlock(&test_spinlock);
2392
2393 mutex_lock(&test_mutex);
2394 msleep(1);
2395 mutex_unlock(&test_mutex);
2396 }
2397
2398 static __init int event_test_thread(void *unused)
2399 {
2400 void *test_malloc;
2401
2402 test_malloc = kmalloc(1234, GFP_KERNEL);
2403 if (!test_malloc)
2404 pr_info("failed to kmalloc\n");
2405
2406 schedule_on_each_cpu(test_work);
2407
2408 kfree(test_malloc);
2409
2410 set_current_state(TASK_INTERRUPTIBLE);
2411 while (!kthread_should_stop())
2412 schedule();
2413
2414 return 0;
2415 }
2416
2417 /*
2418 * Do various things that may trigger events.
2419 */
2420 static __init void event_test_stuff(void)
2421 {
2422 struct task_struct *test_thread;
2423
2424 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2425 msleep(1);
2426 kthread_stop(test_thread);
2427 }
2428
2429 /*
2430 * For every trace event defined, we will test each trace point separately,
2431 * and then by groups, and finally all trace points.
2432 */
2433 static __init void event_trace_self_tests(void)
2434 {
2435 struct ftrace_subsystem_dir *dir;
2436 struct ftrace_event_file *file;
2437 struct ftrace_event_call *call;
2438 struct event_subsystem *system;
2439 struct trace_array *tr;
2440 int ret;
2441
2442 tr = top_trace_array();
2443
2444 pr_info("Running tests on trace events:\n");
2445
2446 list_for_each_entry(file, &tr->events, list) {
2447
2448 call = file->event_call;
2449
2450 /* Only test those that have a probe */
2451 if (!call->class || !call->class->probe)
2452 continue;
2453
2454 /*
2455 * Testing syscall events here is pretty useless, but
2456 * we still do it if configured. But this is time consuming.
2457 * What we really need is a user thread to perform the
2458 * syscalls as we test.
2459 */
2460 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2461 if (call->class->system &&
2462 strcmp(call->class->system, "syscalls") == 0)
2463 continue;
2464 #endif
2465
2466 pr_info("Testing event %s: ", call->name);
2467
2468 /*
2469 * If an event is already enabled, someone is using
2470 * it and the self test should not be on.
2471 */
2472 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2473 pr_warning("Enabled event during self test!\n");
2474 WARN_ON_ONCE(1);
2475 continue;
2476 }
2477
2478 ftrace_event_enable_disable(file, 1);
2479 event_test_stuff();
2480 ftrace_event_enable_disable(file, 0);
2481
2482 pr_cont("OK\n");
2483 }
2484
2485 /* Now test at the sub system level */
2486
2487 pr_info("Running tests on trace event systems:\n");
2488
2489 list_for_each_entry(dir, &tr->systems, list) {
2490
2491 system = dir->subsystem;
2492
2493 /* the ftrace system is special, skip it */
2494 if (strcmp(system->name, "ftrace") == 0)
2495 continue;
2496
2497 pr_info("Testing event system %s: ", system->name);
2498
2499 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2500 if (WARN_ON_ONCE(ret)) {
2501 pr_warning("error enabling system %s\n",
2502 system->name);
2503 continue;
2504 }
2505
2506 event_test_stuff();
2507
2508 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2509 if (WARN_ON_ONCE(ret)) {
2510 pr_warning("error disabling system %s\n",
2511 system->name);
2512 continue;
2513 }
2514
2515 pr_cont("OK\n");
2516 }
2517
2518 /* Test with all events enabled */
2519
2520 pr_info("Running tests on all trace events:\n");
2521 pr_info("Testing all events: ");
2522
2523 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2524 if (WARN_ON_ONCE(ret)) {
2525 pr_warning("error enabling all events\n");
2526 return;
2527 }
2528
2529 event_test_stuff();
2530
2531 /* reset sysname */
2532 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2533 if (WARN_ON_ONCE(ret)) {
2534 pr_warning("error disabling all events\n");
2535 return;
2536 }
2537
2538 pr_cont("OK\n");
2539 }
2540
2541 #ifdef CONFIG_FUNCTION_TRACER
2542
2543 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2544
2545 static void
2546 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2547 struct ftrace_ops *op, struct pt_regs *pt_regs)
2548 {
2549 struct ring_buffer_event *event;
2550 struct ring_buffer *buffer;
2551 struct ftrace_entry *entry;
2552 unsigned long flags;
2553 long disabled;
2554 int cpu;
2555 int pc;
2556
2557 pc = preempt_count();
2558 preempt_disable_notrace();
2559 cpu = raw_smp_processor_id();
2560 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2561
2562 if (disabled != 1)
2563 goto out;
2564
2565 local_save_flags(flags);
2566
2567 event = trace_current_buffer_lock_reserve(&buffer,
2568 TRACE_FN, sizeof(*entry),
2569 flags, pc);
2570 if (!event)
2571 goto out;
2572 entry = ring_buffer_event_data(event);
2573 entry->ip = ip;
2574 entry->parent_ip = parent_ip;
2575
2576 trace_buffer_unlock_commit(buffer, event, flags, pc);
2577
2578 out:
2579 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2580 preempt_enable_notrace();
2581 }
2582
2583 static struct ftrace_ops trace_ops __initdata =
2584 {
2585 .func = function_test_events_call,
2586 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2587 };
2588
2589 static __init void event_trace_self_test_with_function(void)
2590 {
2591 int ret;
2592 ret = register_ftrace_function(&trace_ops);
2593 if (WARN_ON(ret < 0)) {
2594 pr_info("Failed to enable function tracer for event tests\n");
2595 return;
2596 }
2597 pr_info("Running tests again, along with the function tracer\n");
2598 event_trace_self_tests();
2599 unregister_ftrace_function(&trace_ops);
2600 }
2601 #else
2602 static __init void event_trace_self_test_with_function(void)
2603 {
2604 }
2605 #endif
2606
2607 static __init int event_trace_self_tests_init(void)
2608 {
2609 if (!tracing_selftest_disabled) {
2610 event_trace_self_tests();
2611 event_trace_self_test_with_function();
2612 }
2613
2614 return 0;
2615 }
2616
2617 late_initcall(event_trace_self_tests_init);
2618
2619 #endif
This page took 0.139831 seconds and 6 git commands to generate.