Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
[deliverable/linux.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
38
39 struct list_head *
40 trace_get_fields(struct ftrace_event_call *event_call)
41 {
42 if (!event_call->class->get_fields)
43 return &event_call->class->fields;
44 return event_call->class->get_fields(event_call);
45 }
46
47 static int __trace_define_field(struct list_head *head, const char *type,
48 const char *name, int offset, int size,
49 int is_signed, int filter_type)
50 {
51 struct ftrace_event_field *field;
52
53 field = kzalloc(sizeof(*field), GFP_KERNEL);
54 if (!field)
55 goto err;
56
57 field->name = kstrdup(name, GFP_KERNEL);
58 if (!field->name)
59 goto err;
60
61 field->type = kstrdup(type, GFP_KERNEL);
62 if (!field->type)
63 goto err;
64
65 if (filter_type == FILTER_OTHER)
66 field->filter_type = filter_assign_type(type);
67 else
68 field->filter_type = filter_type;
69
70 field->offset = offset;
71 field->size = size;
72 field->is_signed = is_signed;
73
74 list_add(&field->link, head);
75
76 return 0;
77
78 err:
79 if (field)
80 kfree(field->name);
81 kfree(field);
82
83 return -ENOMEM;
84 }
85
86 int trace_define_field(struct ftrace_event_call *call, const char *type,
87 const char *name, int offset, int size, int is_signed,
88 int filter_type)
89 {
90 struct list_head *head;
91
92 if (WARN_ON(!call->class))
93 return 0;
94
95 head = trace_get_fields(call);
96 return __trace_define_field(head, type, name, offset, size,
97 is_signed, filter_type);
98 }
99 EXPORT_SYMBOL_GPL(trace_define_field);
100
101 #define __common_field(type, item) \
102 ret = __trace_define_field(&ftrace_common_fields, #type, \
103 "common_" #item, \
104 offsetof(typeof(ent), item), \
105 sizeof(ent.item), \
106 is_signed_type(type), FILTER_OTHER); \
107 if (ret) \
108 return ret;
109
110 static int trace_define_common_fields(void)
111 {
112 int ret;
113 struct trace_entry ent;
114
115 __common_field(unsigned short, type);
116 __common_field(unsigned char, flags);
117 __common_field(unsigned char, preempt_count);
118 __common_field(int, pid);
119
120 return ret;
121 }
122
123 void trace_destroy_fields(struct ftrace_event_call *call)
124 {
125 struct ftrace_event_field *field, *next;
126 struct list_head *head;
127
128 head = trace_get_fields(call);
129 list_for_each_entry_safe(field, next, head, link) {
130 list_del(&field->link);
131 kfree(field->type);
132 kfree(field->name);
133 kfree(field);
134 }
135 }
136
137 int trace_event_raw_init(struct ftrace_event_call *call)
138 {
139 int id;
140
141 id = register_ftrace_event(&call->event);
142 if (!id)
143 return -ENODEV;
144
145 return 0;
146 }
147 EXPORT_SYMBOL_GPL(trace_event_raw_init);
148
149 int ftrace_event_reg(struct ftrace_event_call *call,
150 enum trace_reg type, void *data)
151 {
152 switch (type) {
153 case TRACE_REG_REGISTER:
154 return tracepoint_probe_register(call->name,
155 call->class->probe,
156 call);
157 case TRACE_REG_UNREGISTER:
158 tracepoint_probe_unregister(call->name,
159 call->class->probe,
160 call);
161 return 0;
162
163 #ifdef CONFIG_PERF_EVENTS
164 case TRACE_REG_PERF_REGISTER:
165 return tracepoint_probe_register(call->name,
166 call->class->perf_probe,
167 call);
168 case TRACE_REG_PERF_UNREGISTER:
169 tracepoint_probe_unregister(call->name,
170 call->class->perf_probe,
171 call);
172 return 0;
173 case TRACE_REG_PERF_OPEN:
174 case TRACE_REG_PERF_CLOSE:
175 case TRACE_REG_PERF_ADD:
176 case TRACE_REG_PERF_DEL:
177 return 0;
178 #endif
179 }
180 return 0;
181 }
182 EXPORT_SYMBOL_GPL(ftrace_event_reg);
183
184 void trace_event_enable_cmd_record(bool enable)
185 {
186 struct ftrace_event_call *call;
187
188 mutex_lock(&event_mutex);
189 list_for_each_entry(call, &ftrace_events, list) {
190 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
191 continue;
192
193 if (enable) {
194 tracing_start_cmdline_record();
195 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
196 } else {
197 tracing_stop_cmdline_record();
198 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
199 }
200 }
201 mutex_unlock(&event_mutex);
202 }
203
204 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
205 int enable)
206 {
207 int ret = 0;
208
209 switch (enable) {
210 case 0:
211 if (call->flags & TRACE_EVENT_FL_ENABLED) {
212 call->flags &= ~TRACE_EVENT_FL_ENABLED;
213 if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
214 tracing_stop_cmdline_record();
215 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
216 }
217 call->class->reg(call, TRACE_REG_UNREGISTER, NULL);
218 }
219 break;
220 case 1:
221 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
222 if (trace_flags & TRACE_ITER_RECORD_CMD) {
223 tracing_start_cmdline_record();
224 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
225 }
226 ret = call->class->reg(call, TRACE_REG_REGISTER, NULL);
227 if (ret) {
228 tracing_stop_cmdline_record();
229 pr_info("event trace: Could not enable event "
230 "%s\n", call->name);
231 break;
232 }
233 call->flags |= TRACE_EVENT_FL_ENABLED;
234 }
235 break;
236 }
237
238 return ret;
239 }
240
241 static void ftrace_clear_events(void)
242 {
243 struct ftrace_event_call *call;
244
245 mutex_lock(&event_mutex);
246 list_for_each_entry(call, &ftrace_events, list) {
247 ftrace_event_enable_disable(call, 0);
248 }
249 mutex_unlock(&event_mutex);
250 }
251
252 static void __put_system(struct event_subsystem *system)
253 {
254 struct event_filter *filter = system->filter;
255
256 WARN_ON_ONCE(system->ref_count == 0);
257 if (--system->ref_count)
258 return;
259
260 if (filter) {
261 kfree(filter->filter_string);
262 kfree(filter);
263 }
264 kfree(system->name);
265 kfree(system);
266 }
267
268 static void __get_system(struct event_subsystem *system)
269 {
270 WARN_ON_ONCE(system->ref_count == 0);
271 system->ref_count++;
272 }
273
274 static void put_system(struct event_subsystem *system)
275 {
276 mutex_lock(&event_mutex);
277 __put_system(system);
278 mutex_unlock(&event_mutex);
279 }
280
281 /*
282 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
283 */
284 static int __ftrace_set_clr_event(const char *match, const char *sub,
285 const char *event, int set)
286 {
287 struct ftrace_event_call *call;
288 int ret = -EINVAL;
289
290 mutex_lock(&event_mutex);
291 list_for_each_entry(call, &ftrace_events, list) {
292
293 if (!call->name || !call->class || !call->class->reg)
294 continue;
295
296 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
297 continue;
298
299 if (match &&
300 strcmp(match, call->name) != 0 &&
301 strcmp(match, call->class->system) != 0)
302 continue;
303
304 if (sub && strcmp(sub, call->class->system) != 0)
305 continue;
306
307 if (event && strcmp(event, call->name) != 0)
308 continue;
309
310 ftrace_event_enable_disable(call, set);
311
312 ret = 0;
313 }
314 mutex_unlock(&event_mutex);
315
316 return ret;
317 }
318
319 static int ftrace_set_clr_event(char *buf, int set)
320 {
321 char *event = NULL, *sub = NULL, *match;
322
323 /*
324 * The buf format can be <subsystem>:<event-name>
325 * *:<event-name> means any event by that name.
326 * :<event-name> is the same.
327 *
328 * <subsystem>:* means all events in that subsystem
329 * <subsystem>: means the same.
330 *
331 * <name> (no ':') means all events in a subsystem with
332 * the name <name> or any event that matches <name>
333 */
334
335 match = strsep(&buf, ":");
336 if (buf) {
337 sub = match;
338 event = buf;
339 match = NULL;
340
341 if (!strlen(sub) || strcmp(sub, "*") == 0)
342 sub = NULL;
343 if (!strlen(event) || strcmp(event, "*") == 0)
344 event = NULL;
345 }
346
347 return __ftrace_set_clr_event(match, sub, event, set);
348 }
349
350 /**
351 * trace_set_clr_event - enable or disable an event
352 * @system: system name to match (NULL for any system)
353 * @event: event name to match (NULL for all events, within system)
354 * @set: 1 to enable, 0 to disable
355 *
356 * This is a way for other parts of the kernel to enable or disable
357 * event recording.
358 *
359 * Returns 0 on success, -EINVAL if the parameters do not match any
360 * registered events.
361 */
362 int trace_set_clr_event(const char *system, const char *event, int set)
363 {
364 return __ftrace_set_clr_event(NULL, system, event, set);
365 }
366 EXPORT_SYMBOL_GPL(trace_set_clr_event);
367
368 /* 128 should be much more than enough */
369 #define EVENT_BUF_SIZE 127
370
371 static ssize_t
372 ftrace_event_write(struct file *file, const char __user *ubuf,
373 size_t cnt, loff_t *ppos)
374 {
375 struct trace_parser parser;
376 ssize_t read, ret;
377
378 if (!cnt)
379 return 0;
380
381 ret = tracing_update_buffers();
382 if (ret < 0)
383 return ret;
384
385 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
386 return -ENOMEM;
387
388 read = trace_get_user(&parser, ubuf, cnt, ppos);
389
390 if (read >= 0 && trace_parser_loaded((&parser))) {
391 int set = 1;
392
393 if (*parser.buffer == '!')
394 set = 0;
395
396 parser.buffer[parser.idx] = 0;
397
398 ret = ftrace_set_clr_event(parser.buffer + !set, set);
399 if (ret)
400 goto out_put;
401 }
402
403 ret = read;
404
405 out_put:
406 trace_parser_put(&parser);
407
408 return ret;
409 }
410
411 static void *
412 t_next(struct seq_file *m, void *v, loff_t *pos)
413 {
414 struct ftrace_event_call *call = v;
415
416 (*pos)++;
417
418 list_for_each_entry_continue(call, &ftrace_events, list) {
419 /*
420 * The ftrace subsystem is for showing formats only.
421 * They can not be enabled or disabled via the event files.
422 */
423 if (call->class && call->class->reg)
424 return call;
425 }
426
427 return NULL;
428 }
429
430 static void *t_start(struct seq_file *m, loff_t *pos)
431 {
432 struct ftrace_event_call *call;
433 loff_t l;
434
435 mutex_lock(&event_mutex);
436
437 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
438 for (l = 0; l <= *pos; ) {
439 call = t_next(m, call, &l);
440 if (!call)
441 break;
442 }
443 return call;
444 }
445
446 static void *
447 s_next(struct seq_file *m, void *v, loff_t *pos)
448 {
449 struct ftrace_event_call *call = v;
450
451 (*pos)++;
452
453 list_for_each_entry_continue(call, &ftrace_events, list) {
454 if (call->flags & TRACE_EVENT_FL_ENABLED)
455 return call;
456 }
457
458 return NULL;
459 }
460
461 static void *s_start(struct seq_file *m, loff_t *pos)
462 {
463 struct ftrace_event_call *call;
464 loff_t l;
465
466 mutex_lock(&event_mutex);
467
468 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
469 for (l = 0; l <= *pos; ) {
470 call = s_next(m, call, &l);
471 if (!call)
472 break;
473 }
474 return call;
475 }
476
477 static int t_show(struct seq_file *m, void *v)
478 {
479 struct ftrace_event_call *call = v;
480
481 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
482 seq_printf(m, "%s:", call->class->system);
483 seq_printf(m, "%s\n", call->name);
484
485 return 0;
486 }
487
488 static void t_stop(struct seq_file *m, void *p)
489 {
490 mutex_unlock(&event_mutex);
491 }
492
493 static ssize_t
494 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
495 loff_t *ppos)
496 {
497 struct ftrace_event_call *call = filp->private_data;
498 char *buf;
499
500 if (call->flags & TRACE_EVENT_FL_ENABLED)
501 buf = "1\n";
502 else
503 buf = "0\n";
504
505 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
506 }
507
508 static ssize_t
509 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
510 loff_t *ppos)
511 {
512 struct ftrace_event_call *call = filp->private_data;
513 unsigned long val;
514 int ret;
515
516 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
517 if (ret)
518 return ret;
519
520 ret = tracing_update_buffers();
521 if (ret < 0)
522 return ret;
523
524 switch (val) {
525 case 0:
526 case 1:
527 mutex_lock(&event_mutex);
528 ret = ftrace_event_enable_disable(call, val);
529 mutex_unlock(&event_mutex);
530 break;
531
532 default:
533 return -EINVAL;
534 }
535
536 *ppos += cnt;
537
538 return ret ? ret : cnt;
539 }
540
541 static ssize_t
542 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
543 loff_t *ppos)
544 {
545 const char set_to_char[4] = { '?', '0', '1', 'X' };
546 struct event_subsystem *system = filp->private_data;
547 struct ftrace_event_call *call;
548 char buf[2];
549 int set = 0;
550 int ret;
551
552 mutex_lock(&event_mutex);
553 list_for_each_entry(call, &ftrace_events, list) {
554 if (!call->name || !call->class || !call->class->reg)
555 continue;
556
557 if (system && strcmp(call->class->system, system->name) != 0)
558 continue;
559
560 /*
561 * We need to find out if all the events are set
562 * or if all events or cleared, or if we have
563 * a mixture.
564 */
565 set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
566
567 /*
568 * If we have a mixture, no need to look further.
569 */
570 if (set == 3)
571 break;
572 }
573 mutex_unlock(&event_mutex);
574
575 buf[0] = set_to_char[set];
576 buf[1] = '\n';
577
578 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
579
580 return ret;
581 }
582
583 static ssize_t
584 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
585 loff_t *ppos)
586 {
587 struct event_subsystem *system = filp->private_data;
588 const char *name = NULL;
589 unsigned long val;
590 ssize_t ret;
591
592 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
593 if (ret)
594 return ret;
595
596 ret = tracing_update_buffers();
597 if (ret < 0)
598 return ret;
599
600 if (val != 0 && val != 1)
601 return -EINVAL;
602
603 /*
604 * Opening of "enable" adds a ref count to system,
605 * so the name is safe to use.
606 */
607 if (system)
608 name = system->name;
609
610 ret = __ftrace_set_clr_event(NULL, name, NULL, val);
611 if (ret)
612 goto out;
613
614 ret = cnt;
615
616 out:
617 *ppos += cnt;
618
619 return ret;
620 }
621
622 enum {
623 FORMAT_HEADER = 1,
624 FORMAT_FIELD_SEPERATOR = 2,
625 FORMAT_PRINTFMT = 3,
626 };
627
628 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
629 {
630 struct ftrace_event_call *call = m->private;
631 struct ftrace_event_field *field;
632 struct list_head *common_head = &ftrace_common_fields;
633 struct list_head *head = trace_get_fields(call);
634
635 (*pos)++;
636
637 switch ((unsigned long)v) {
638 case FORMAT_HEADER:
639 if (unlikely(list_empty(common_head)))
640 return NULL;
641
642 field = list_entry(common_head->prev,
643 struct ftrace_event_field, link);
644 return field;
645
646 case FORMAT_FIELD_SEPERATOR:
647 if (unlikely(list_empty(head)))
648 return NULL;
649
650 field = list_entry(head->prev, struct ftrace_event_field, link);
651 return field;
652
653 case FORMAT_PRINTFMT:
654 /* all done */
655 return NULL;
656 }
657
658 field = v;
659 if (field->link.prev == common_head)
660 return (void *)FORMAT_FIELD_SEPERATOR;
661 else if (field->link.prev == head)
662 return (void *)FORMAT_PRINTFMT;
663
664 field = list_entry(field->link.prev, struct ftrace_event_field, link);
665
666 return field;
667 }
668
669 static void *f_start(struct seq_file *m, loff_t *pos)
670 {
671 loff_t l = 0;
672 void *p;
673
674 /* Start by showing the header */
675 if (!*pos)
676 return (void *)FORMAT_HEADER;
677
678 p = (void *)FORMAT_HEADER;
679 do {
680 p = f_next(m, p, &l);
681 } while (p && l < *pos);
682
683 return p;
684 }
685
686 static int f_show(struct seq_file *m, void *v)
687 {
688 struct ftrace_event_call *call = m->private;
689 struct ftrace_event_field *field;
690 const char *array_descriptor;
691
692 switch ((unsigned long)v) {
693 case FORMAT_HEADER:
694 seq_printf(m, "name: %s\n", call->name);
695 seq_printf(m, "ID: %d\n", call->event.type);
696 seq_printf(m, "format:\n");
697 return 0;
698
699 case FORMAT_FIELD_SEPERATOR:
700 seq_putc(m, '\n');
701 return 0;
702
703 case FORMAT_PRINTFMT:
704 seq_printf(m, "\nprint fmt: %s\n",
705 call->print_fmt);
706 return 0;
707 }
708
709 field = v;
710
711 /*
712 * Smartly shows the array type(except dynamic array).
713 * Normal:
714 * field:TYPE VAR
715 * If TYPE := TYPE[LEN], it is shown:
716 * field:TYPE VAR[LEN]
717 */
718 array_descriptor = strchr(field->type, '[');
719
720 if (!strncmp(field->type, "__data_loc", 10))
721 array_descriptor = NULL;
722
723 if (!array_descriptor)
724 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
725 field->type, field->name, field->offset,
726 field->size, !!field->is_signed);
727 else
728 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
729 (int)(array_descriptor - field->type),
730 field->type, field->name,
731 array_descriptor, field->offset,
732 field->size, !!field->is_signed);
733
734 return 0;
735 }
736
737 static void f_stop(struct seq_file *m, void *p)
738 {
739 }
740
741 static const struct seq_operations trace_format_seq_ops = {
742 .start = f_start,
743 .next = f_next,
744 .stop = f_stop,
745 .show = f_show,
746 };
747
748 static int trace_format_open(struct inode *inode, struct file *file)
749 {
750 struct ftrace_event_call *call = inode->i_private;
751 struct seq_file *m;
752 int ret;
753
754 ret = seq_open(file, &trace_format_seq_ops);
755 if (ret < 0)
756 return ret;
757
758 m = file->private_data;
759 m->private = call;
760
761 return 0;
762 }
763
764 static ssize_t
765 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
766 {
767 struct ftrace_event_call *call = filp->private_data;
768 struct trace_seq *s;
769 int r;
770
771 if (*ppos)
772 return 0;
773
774 s = kmalloc(sizeof(*s), GFP_KERNEL);
775 if (!s)
776 return -ENOMEM;
777
778 trace_seq_init(s);
779 trace_seq_printf(s, "%d\n", call->event.type);
780
781 r = simple_read_from_buffer(ubuf, cnt, ppos,
782 s->buffer, s->len);
783 kfree(s);
784 return r;
785 }
786
787 static ssize_t
788 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
789 loff_t *ppos)
790 {
791 struct ftrace_event_call *call = filp->private_data;
792 struct trace_seq *s;
793 int r;
794
795 if (*ppos)
796 return 0;
797
798 s = kmalloc(sizeof(*s), GFP_KERNEL);
799 if (!s)
800 return -ENOMEM;
801
802 trace_seq_init(s);
803
804 print_event_filter(call, s);
805 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
806
807 kfree(s);
808
809 return r;
810 }
811
812 static ssize_t
813 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
814 loff_t *ppos)
815 {
816 struct ftrace_event_call *call = filp->private_data;
817 char *buf;
818 int err;
819
820 if (cnt >= PAGE_SIZE)
821 return -EINVAL;
822
823 buf = (char *)__get_free_page(GFP_TEMPORARY);
824 if (!buf)
825 return -ENOMEM;
826
827 if (copy_from_user(buf, ubuf, cnt)) {
828 free_page((unsigned long) buf);
829 return -EFAULT;
830 }
831 buf[cnt] = '\0';
832
833 err = apply_event_filter(call, buf);
834 free_page((unsigned long) buf);
835 if (err < 0)
836 return err;
837
838 *ppos += cnt;
839
840 return cnt;
841 }
842
843 static LIST_HEAD(event_subsystems);
844
845 static int subsystem_open(struct inode *inode, struct file *filp)
846 {
847 struct event_subsystem *system = NULL;
848 int ret;
849
850 if (!inode->i_private)
851 goto skip_search;
852
853 /* Make sure the system still exists */
854 mutex_lock(&event_mutex);
855 list_for_each_entry(system, &event_subsystems, list) {
856 if (system == inode->i_private) {
857 /* Don't open systems with no events */
858 if (!system->nr_events) {
859 system = NULL;
860 break;
861 }
862 __get_system(system);
863 break;
864 }
865 }
866 mutex_unlock(&event_mutex);
867
868 if (system != inode->i_private)
869 return -ENODEV;
870
871 skip_search:
872 ret = tracing_open_generic(inode, filp);
873 if (ret < 0 && system)
874 put_system(system);
875
876 return ret;
877 }
878
879 static int subsystem_release(struct inode *inode, struct file *file)
880 {
881 struct event_subsystem *system = inode->i_private;
882
883 if (system)
884 put_system(system);
885
886 return 0;
887 }
888
889 static ssize_t
890 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
891 loff_t *ppos)
892 {
893 struct event_subsystem *system = filp->private_data;
894 struct trace_seq *s;
895 int r;
896
897 if (*ppos)
898 return 0;
899
900 s = kmalloc(sizeof(*s), GFP_KERNEL);
901 if (!s)
902 return -ENOMEM;
903
904 trace_seq_init(s);
905
906 print_subsystem_event_filter(system, s);
907 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
908
909 kfree(s);
910
911 return r;
912 }
913
914 static ssize_t
915 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
916 loff_t *ppos)
917 {
918 struct event_subsystem *system = filp->private_data;
919 char *buf;
920 int err;
921
922 if (cnt >= PAGE_SIZE)
923 return -EINVAL;
924
925 buf = (char *)__get_free_page(GFP_TEMPORARY);
926 if (!buf)
927 return -ENOMEM;
928
929 if (copy_from_user(buf, ubuf, cnt)) {
930 free_page((unsigned long) buf);
931 return -EFAULT;
932 }
933 buf[cnt] = '\0';
934
935 err = apply_subsystem_event_filter(system, buf);
936 free_page((unsigned long) buf);
937 if (err < 0)
938 return err;
939
940 *ppos += cnt;
941
942 return cnt;
943 }
944
945 static ssize_t
946 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
947 {
948 int (*func)(struct trace_seq *s) = filp->private_data;
949 struct trace_seq *s;
950 int r;
951
952 if (*ppos)
953 return 0;
954
955 s = kmalloc(sizeof(*s), GFP_KERNEL);
956 if (!s)
957 return -ENOMEM;
958
959 trace_seq_init(s);
960
961 func(s);
962 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
963
964 kfree(s);
965
966 return r;
967 }
968
969 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
970 static int ftrace_event_set_open(struct inode *inode, struct file *file);
971
972 static const struct seq_operations show_event_seq_ops = {
973 .start = t_start,
974 .next = t_next,
975 .show = t_show,
976 .stop = t_stop,
977 };
978
979 static const struct seq_operations show_set_event_seq_ops = {
980 .start = s_start,
981 .next = s_next,
982 .show = t_show,
983 .stop = t_stop,
984 };
985
986 static const struct file_operations ftrace_avail_fops = {
987 .open = ftrace_event_avail_open,
988 .read = seq_read,
989 .llseek = seq_lseek,
990 .release = seq_release,
991 };
992
993 static const struct file_operations ftrace_set_event_fops = {
994 .open = ftrace_event_set_open,
995 .read = seq_read,
996 .write = ftrace_event_write,
997 .llseek = seq_lseek,
998 .release = seq_release,
999 };
1000
1001 static const struct file_operations ftrace_enable_fops = {
1002 .open = tracing_open_generic,
1003 .read = event_enable_read,
1004 .write = event_enable_write,
1005 .llseek = default_llseek,
1006 };
1007
1008 static const struct file_operations ftrace_event_format_fops = {
1009 .open = trace_format_open,
1010 .read = seq_read,
1011 .llseek = seq_lseek,
1012 .release = seq_release,
1013 };
1014
1015 static const struct file_operations ftrace_event_id_fops = {
1016 .open = tracing_open_generic,
1017 .read = event_id_read,
1018 .llseek = default_llseek,
1019 };
1020
1021 static const struct file_operations ftrace_event_filter_fops = {
1022 .open = tracing_open_generic,
1023 .read = event_filter_read,
1024 .write = event_filter_write,
1025 .llseek = default_llseek,
1026 };
1027
1028 static const struct file_operations ftrace_subsystem_filter_fops = {
1029 .open = subsystem_open,
1030 .read = subsystem_filter_read,
1031 .write = subsystem_filter_write,
1032 .llseek = default_llseek,
1033 .release = subsystem_release,
1034 };
1035
1036 static const struct file_operations ftrace_system_enable_fops = {
1037 .open = subsystem_open,
1038 .read = system_enable_read,
1039 .write = system_enable_write,
1040 .llseek = default_llseek,
1041 .release = subsystem_release,
1042 };
1043
1044 static const struct file_operations ftrace_show_header_fops = {
1045 .open = tracing_open_generic,
1046 .read = show_header,
1047 .llseek = default_llseek,
1048 };
1049
1050 static struct dentry *event_trace_events_dir(void)
1051 {
1052 static struct dentry *d_tracer;
1053 static struct dentry *d_events;
1054
1055 if (d_events)
1056 return d_events;
1057
1058 d_tracer = tracing_init_dentry();
1059 if (!d_tracer)
1060 return NULL;
1061
1062 d_events = debugfs_create_dir("events", d_tracer);
1063 if (!d_events)
1064 pr_warning("Could not create debugfs "
1065 "'events' directory\n");
1066
1067 return d_events;
1068 }
1069
1070 static int
1071 ftrace_event_avail_open(struct inode *inode, struct file *file)
1072 {
1073 const struct seq_operations *seq_ops = &show_event_seq_ops;
1074
1075 return seq_open(file, seq_ops);
1076 }
1077
1078 static int
1079 ftrace_event_set_open(struct inode *inode, struct file *file)
1080 {
1081 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1082
1083 if ((file->f_mode & FMODE_WRITE) &&
1084 (file->f_flags & O_TRUNC))
1085 ftrace_clear_events();
1086
1087 return seq_open(file, seq_ops);
1088 }
1089
1090 static struct dentry *
1091 event_subsystem_dir(const char *name, struct dentry *d_events)
1092 {
1093 struct event_subsystem *system;
1094 struct dentry *entry;
1095
1096 /* First see if we did not already create this dir */
1097 list_for_each_entry(system, &event_subsystems, list) {
1098 if (strcmp(system->name, name) == 0) {
1099 system->nr_events++;
1100 return system->entry;
1101 }
1102 }
1103
1104 /* need to create new entry */
1105 system = kmalloc(sizeof(*system), GFP_KERNEL);
1106 if (!system) {
1107 pr_warning("No memory to create event subsystem %s\n",
1108 name);
1109 return d_events;
1110 }
1111
1112 system->entry = debugfs_create_dir(name, d_events);
1113 if (!system->entry) {
1114 pr_warning("Could not create event subsystem %s\n",
1115 name);
1116 kfree(system);
1117 return d_events;
1118 }
1119
1120 system->nr_events = 1;
1121 system->ref_count = 1;
1122 system->name = kstrdup(name, GFP_KERNEL);
1123 if (!system->name) {
1124 debugfs_remove(system->entry);
1125 kfree(system);
1126 return d_events;
1127 }
1128
1129 list_add(&system->list, &event_subsystems);
1130
1131 system->filter = NULL;
1132
1133 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1134 if (!system->filter) {
1135 pr_warning("Could not allocate filter for subsystem "
1136 "'%s'\n", name);
1137 return system->entry;
1138 }
1139
1140 entry = debugfs_create_file("filter", 0644, system->entry, system,
1141 &ftrace_subsystem_filter_fops);
1142 if (!entry) {
1143 kfree(system->filter);
1144 system->filter = NULL;
1145 pr_warning("Could not create debugfs "
1146 "'%s/filter' entry\n", name);
1147 }
1148
1149 trace_create_file("enable", 0644, system->entry, system,
1150 &ftrace_system_enable_fops);
1151
1152 return system->entry;
1153 }
1154
1155 static int
1156 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1157 const struct file_operations *id,
1158 const struct file_operations *enable,
1159 const struct file_operations *filter,
1160 const struct file_operations *format)
1161 {
1162 struct list_head *head;
1163 int ret;
1164
1165 /*
1166 * If the trace point header did not define TRACE_SYSTEM
1167 * then the system would be called "TRACE_SYSTEM".
1168 */
1169 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1170 d_events = event_subsystem_dir(call->class->system, d_events);
1171
1172 call->dir = debugfs_create_dir(call->name, d_events);
1173 if (!call->dir) {
1174 pr_warning("Could not create debugfs "
1175 "'%s' directory\n", call->name);
1176 return -1;
1177 }
1178
1179 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1180 trace_create_file("enable", 0644, call->dir, call,
1181 enable);
1182
1183 #ifdef CONFIG_PERF_EVENTS
1184 if (call->event.type && call->class->reg)
1185 trace_create_file("id", 0444, call->dir, call,
1186 id);
1187 #endif
1188
1189 /*
1190 * Other events may have the same class. Only update
1191 * the fields if they are not already defined.
1192 */
1193 head = trace_get_fields(call);
1194 if (list_empty(head)) {
1195 ret = call->class->define_fields(call);
1196 if (ret < 0) {
1197 pr_warning("Could not initialize trace point"
1198 " events/%s\n", call->name);
1199 return ret;
1200 }
1201 }
1202 trace_create_file("filter", 0644, call->dir, call,
1203 filter);
1204
1205 trace_create_file("format", 0444, call->dir, call,
1206 format);
1207
1208 return 0;
1209 }
1210
1211 static void event_remove(struct ftrace_event_call *call)
1212 {
1213 ftrace_event_enable_disable(call, 0);
1214 if (call->event.funcs)
1215 __unregister_ftrace_event(&call->event);
1216 list_del(&call->list);
1217 }
1218
1219 static int event_init(struct ftrace_event_call *call)
1220 {
1221 int ret = 0;
1222
1223 if (WARN_ON(!call->name))
1224 return -EINVAL;
1225
1226 if (call->class->raw_init) {
1227 ret = call->class->raw_init(call);
1228 if (ret < 0 && ret != -ENOSYS)
1229 pr_warn("Could not initialize trace events/%s\n",
1230 call->name);
1231 }
1232
1233 return ret;
1234 }
1235
1236 static int
1237 __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1238 const struct file_operations *id,
1239 const struct file_operations *enable,
1240 const struct file_operations *filter,
1241 const struct file_operations *format)
1242 {
1243 struct dentry *d_events;
1244 int ret;
1245
1246 ret = event_init(call);
1247 if (ret < 0)
1248 return ret;
1249
1250 d_events = event_trace_events_dir();
1251 if (!d_events)
1252 return -ENOENT;
1253
1254 ret = event_create_dir(call, d_events, id, enable, filter, format);
1255 if (!ret)
1256 list_add(&call->list, &ftrace_events);
1257 call->mod = mod;
1258
1259 return ret;
1260 }
1261
1262 /* Add an additional event_call dynamically */
1263 int trace_add_event_call(struct ftrace_event_call *call)
1264 {
1265 int ret;
1266 mutex_lock(&event_mutex);
1267 ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1268 &ftrace_enable_fops,
1269 &ftrace_event_filter_fops,
1270 &ftrace_event_format_fops);
1271 mutex_unlock(&event_mutex);
1272 return ret;
1273 }
1274
1275 static void remove_subsystem_dir(const char *name)
1276 {
1277 struct event_subsystem *system;
1278
1279 if (strcmp(name, TRACE_SYSTEM) == 0)
1280 return;
1281
1282 list_for_each_entry(system, &event_subsystems, list) {
1283 if (strcmp(system->name, name) == 0) {
1284 if (!--system->nr_events) {
1285 debugfs_remove_recursive(system->entry);
1286 list_del(&system->list);
1287 __put_system(system);
1288 }
1289 break;
1290 }
1291 }
1292 }
1293
1294 /*
1295 * Must be called under locking both of event_mutex and trace_event_mutex.
1296 */
1297 static void __trace_remove_event_call(struct ftrace_event_call *call)
1298 {
1299 event_remove(call);
1300 trace_destroy_fields(call);
1301 destroy_preds(call);
1302 debugfs_remove_recursive(call->dir);
1303 remove_subsystem_dir(call->class->system);
1304 }
1305
1306 /* Remove an event_call */
1307 void trace_remove_event_call(struct ftrace_event_call *call)
1308 {
1309 mutex_lock(&event_mutex);
1310 down_write(&trace_event_mutex);
1311 __trace_remove_event_call(call);
1312 up_write(&trace_event_mutex);
1313 mutex_unlock(&event_mutex);
1314 }
1315
1316 #define for_each_event(event, start, end) \
1317 for (event = start; \
1318 (unsigned long)event < (unsigned long)end; \
1319 event++)
1320
1321 #ifdef CONFIG_MODULES
1322
1323 static LIST_HEAD(ftrace_module_file_list);
1324
1325 /*
1326 * Modules must own their file_operations to keep up with
1327 * reference counting.
1328 */
1329 struct ftrace_module_file_ops {
1330 struct list_head list;
1331 struct module *mod;
1332 struct file_operations id;
1333 struct file_operations enable;
1334 struct file_operations format;
1335 struct file_operations filter;
1336 };
1337
1338 static struct ftrace_module_file_ops *
1339 trace_create_file_ops(struct module *mod)
1340 {
1341 struct ftrace_module_file_ops *file_ops;
1342
1343 /*
1344 * This is a bit of a PITA. To allow for correct reference
1345 * counting, modules must "own" their file_operations.
1346 * To do this, we allocate the file operations that will be
1347 * used in the event directory.
1348 */
1349
1350 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1351 if (!file_ops)
1352 return NULL;
1353
1354 file_ops->mod = mod;
1355
1356 file_ops->id = ftrace_event_id_fops;
1357 file_ops->id.owner = mod;
1358
1359 file_ops->enable = ftrace_enable_fops;
1360 file_ops->enable.owner = mod;
1361
1362 file_ops->filter = ftrace_event_filter_fops;
1363 file_ops->filter.owner = mod;
1364
1365 file_ops->format = ftrace_event_format_fops;
1366 file_ops->format.owner = mod;
1367
1368 list_add(&file_ops->list, &ftrace_module_file_list);
1369
1370 return file_ops;
1371 }
1372
1373 static void trace_module_add_events(struct module *mod)
1374 {
1375 struct ftrace_module_file_ops *file_ops = NULL;
1376 struct ftrace_event_call **call, **start, **end;
1377
1378 start = mod->trace_events;
1379 end = mod->trace_events + mod->num_trace_events;
1380
1381 if (start == end)
1382 return;
1383
1384 file_ops = trace_create_file_ops(mod);
1385 if (!file_ops)
1386 return;
1387
1388 for_each_event(call, start, end) {
1389 __trace_add_event_call(*call, mod,
1390 &file_ops->id, &file_ops->enable,
1391 &file_ops->filter, &file_ops->format);
1392 }
1393 }
1394
1395 static void trace_module_remove_events(struct module *mod)
1396 {
1397 struct ftrace_module_file_ops *file_ops;
1398 struct ftrace_event_call *call, *p;
1399 bool found = false;
1400
1401 down_write(&trace_event_mutex);
1402 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1403 if (call->mod == mod) {
1404 found = true;
1405 __trace_remove_event_call(call);
1406 }
1407 }
1408
1409 /* Now free the file_operations */
1410 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1411 if (file_ops->mod == mod)
1412 break;
1413 }
1414 if (&file_ops->list != &ftrace_module_file_list) {
1415 list_del(&file_ops->list);
1416 kfree(file_ops);
1417 }
1418
1419 /*
1420 * It is safest to reset the ring buffer if the module being unloaded
1421 * registered any events.
1422 */
1423 if (found)
1424 tracing_reset_current_online_cpus();
1425 up_write(&trace_event_mutex);
1426 }
1427
1428 static int trace_module_notify(struct notifier_block *self,
1429 unsigned long val, void *data)
1430 {
1431 struct module *mod = data;
1432
1433 mutex_lock(&event_mutex);
1434 switch (val) {
1435 case MODULE_STATE_COMING:
1436 trace_module_add_events(mod);
1437 break;
1438 case MODULE_STATE_GOING:
1439 trace_module_remove_events(mod);
1440 break;
1441 }
1442 mutex_unlock(&event_mutex);
1443
1444 return 0;
1445 }
1446 #else
1447 static int trace_module_notify(struct notifier_block *self,
1448 unsigned long val, void *data)
1449 {
1450 return 0;
1451 }
1452 #endif /* CONFIG_MODULES */
1453
1454 static struct notifier_block trace_module_nb = {
1455 .notifier_call = trace_module_notify,
1456 .priority = 0,
1457 };
1458
1459 extern struct ftrace_event_call *__start_ftrace_events[];
1460 extern struct ftrace_event_call *__stop_ftrace_events[];
1461
1462 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1463
1464 static __init int setup_trace_event(char *str)
1465 {
1466 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1467 ring_buffer_expanded = 1;
1468 tracing_selftest_disabled = 1;
1469
1470 return 1;
1471 }
1472 __setup("trace_event=", setup_trace_event);
1473
1474 static __init int event_trace_enable(void)
1475 {
1476 struct ftrace_event_call **iter, *call;
1477 char *buf = bootup_event_buf;
1478 char *token;
1479 int ret;
1480
1481 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
1482
1483 call = *iter;
1484 ret = event_init(call);
1485 if (!ret)
1486 list_add(&call->list, &ftrace_events);
1487 }
1488
1489 while (true) {
1490 token = strsep(&buf, ",");
1491
1492 if (!token)
1493 break;
1494 if (!*token)
1495 continue;
1496
1497 ret = ftrace_set_clr_event(token, 1);
1498 if (ret)
1499 pr_warn("Failed to enable trace event: %s\n", token);
1500 }
1501
1502 trace_printk_start_comm();
1503
1504 return 0;
1505 }
1506
1507 static __init int event_trace_init(void)
1508 {
1509 struct ftrace_event_call *call;
1510 struct dentry *d_tracer;
1511 struct dentry *entry;
1512 struct dentry *d_events;
1513 int ret;
1514
1515 d_tracer = tracing_init_dentry();
1516 if (!d_tracer)
1517 return 0;
1518
1519 entry = debugfs_create_file("available_events", 0444, d_tracer,
1520 NULL, &ftrace_avail_fops);
1521 if (!entry)
1522 pr_warning("Could not create debugfs "
1523 "'available_events' entry\n");
1524
1525 entry = debugfs_create_file("set_event", 0644, d_tracer,
1526 NULL, &ftrace_set_event_fops);
1527 if (!entry)
1528 pr_warning("Could not create debugfs "
1529 "'set_event' entry\n");
1530
1531 d_events = event_trace_events_dir();
1532 if (!d_events)
1533 return 0;
1534
1535 /* ring buffer internal formats */
1536 trace_create_file("header_page", 0444, d_events,
1537 ring_buffer_print_page_header,
1538 &ftrace_show_header_fops);
1539
1540 trace_create_file("header_event", 0444, d_events,
1541 ring_buffer_print_entry_header,
1542 &ftrace_show_header_fops);
1543
1544 trace_create_file("enable", 0644, d_events,
1545 NULL, &ftrace_system_enable_fops);
1546
1547 if (trace_define_common_fields())
1548 pr_warning("tracing: Failed to allocate common fields");
1549
1550 /*
1551 * Early initialization already enabled ftrace event.
1552 * Now it's only necessary to create the event directory.
1553 */
1554 list_for_each_entry(call, &ftrace_events, list) {
1555
1556 ret = event_create_dir(call, d_events,
1557 &ftrace_event_id_fops,
1558 &ftrace_enable_fops,
1559 &ftrace_event_filter_fops,
1560 &ftrace_event_format_fops);
1561 if (ret < 0)
1562 event_remove(call);
1563 }
1564
1565 ret = register_module_notifier(&trace_module_nb);
1566 if (ret)
1567 pr_warning("Failed to register trace events module notifier\n");
1568
1569 return 0;
1570 }
1571 core_initcall(event_trace_enable);
1572 fs_initcall(event_trace_init);
1573
1574 #ifdef CONFIG_FTRACE_STARTUP_TEST
1575
1576 static DEFINE_SPINLOCK(test_spinlock);
1577 static DEFINE_SPINLOCK(test_spinlock_irq);
1578 static DEFINE_MUTEX(test_mutex);
1579
1580 static __init void test_work(struct work_struct *dummy)
1581 {
1582 spin_lock(&test_spinlock);
1583 spin_lock_irq(&test_spinlock_irq);
1584 udelay(1);
1585 spin_unlock_irq(&test_spinlock_irq);
1586 spin_unlock(&test_spinlock);
1587
1588 mutex_lock(&test_mutex);
1589 msleep(1);
1590 mutex_unlock(&test_mutex);
1591 }
1592
1593 static __init int event_test_thread(void *unused)
1594 {
1595 void *test_malloc;
1596
1597 test_malloc = kmalloc(1234, GFP_KERNEL);
1598 if (!test_malloc)
1599 pr_info("failed to kmalloc\n");
1600
1601 schedule_on_each_cpu(test_work);
1602
1603 kfree(test_malloc);
1604
1605 set_current_state(TASK_INTERRUPTIBLE);
1606 while (!kthread_should_stop())
1607 schedule();
1608
1609 return 0;
1610 }
1611
1612 /*
1613 * Do various things that may trigger events.
1614 */
1615 static __init void event_test_stuff(void)
1616 {
1617 struct task_struct *test_thread;
1618
1619 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1620 msleep(1);
1621 kthread_stop(test_thread);
1622 }
1623
1624 /*
1625 * For every trace event defined, we will test each trace point separately,
1626 * and then by groups, and finally all trace points.
1627 */
1628 static __init void event_trace_self_tests(void)
1629 {
1630 struct ftrace_event_call *call;
1631 struct event_subsystem *system;
1632 int ret;
1633
1634 pr_info("Running tests on trace events:\n");
1635
1636 list_for_each_entry(call, &ftrace_events, list) {
1637
1638 /* Only test those that have a probe */
1639 if (!call->class || !call->class->probe)
1640 continue;
1641
1642 /*
1643 * Testing syscall events here is pretty useless, but
1644 * we still do it if configured. But this is time consuming.
1645 * What we really need is a user thread to perform the
1646 * syscalls as we test.
1647 */
1648 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1649 if (call->class->system &&
1650 strcmp(call->class->system, "syscalls") == 0)
1651 continue;
1652 #endif
1653
1654 pr_info("Testing event %s: ", call->name);
1655
1656 /*
1657 * If an event is already enabled, someone is using
1658 * it and the self test should not be on.
1659 */
1660 if (call->flags & TRACE_EVENT_FL_ENABLED) {
1661 pr_warning("Enabled event during self test!\n");
1662 WARN_ON_ONCE(1);
1663 continue;
1664 }
1665
1666 ftrace_event_enable_disable(call, 1);
1667 event_test_stuff();
1668 ftrace_event_enable_disable(call, 0);
1669
1670 pr_cont("OK\n");
1671 }
1672
1673 /* Now test at the sub system level */
1674
1675 pr_info("Running tests on trace event systems:\n");
1676
1677 list_for_each_entry(system, &event_subsystems, list) {
1678
1679 /* the ftrace system is special, skip it */
1680 if (strcmp(system->name, "ftrace") == 0)
1681 continue;
1682
1683 pr_info("Testing event system %s: ", system->name);
1684
1685 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1686 if (WARN_ON_ONCE(ret)) {
1687 pr_warning("error enabling system %s\n",
1688 system->name);
1689 continue;
1690 }
1691
1692 event_test_stuff();
1693
1694 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1695 if (WARN_ON_ONCE(ret)) {
1696 pr_warning("error disabling system %s\n",
1697 system->name);
1698 continue;
1699 }
1700
1701 pr_cont("OK\n");
1702 }
1703
1704 /* Test with all events enabled */
1705
1706 pr_info("Running tests on all trace events:\n");
1707 pr_info("Testing all events: ");
1708
1709 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1710 if (WARN_ON_ONCE(ret)) {
1711 pr_warning("error enabling all events\n");
1712 return;
1713 }
1714
1715 event_test_stuff();
1716
1717 /* reset sysname */
1718 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1719 if (WARN_ON_ONCE(ret)) {
1720 pr_warning("error disabling all events\n");
1721 return;
1722 }
1723
1724 pr_cont("OK\n");
1725 }
1726
1727 #ifdef CONFIG_FUNCTION_TRACER
1728
1729 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1730
1731 static void
1732 function_test_events_call(unsigned long ip, unsigned long parent_ip,
1733 struct ftrace_ops *op, struct pt_regs *pt_regs)
1734 {
1735 struct ring_buffer_event *event;
1736 struct ring_buffer *buffer;
1737 struct ftrace_entry *entry;
1738 unsigned long flags;
1739 long disabled;
1740 int cpu;
1741 int pc;
1742
1743 pc = preempt_count();
1744 preempt_disable_notrace();
1745 cpu = raw_smp_processor_id();
1746 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1747
1748 if (disabled != 1)
1749 goto out;
1750
1751 local_save_flags(flags);
1752
1753 event = trace_current_buffer_lock_reserve(&buffer,
1754 TRACE_FN, sizeof(*entry),
1755 flags, pc);
1756 if (!event)
1757 goto out;
1758 entry = ring_buffer_event_data(event);
1759 entry->ip = ip;
1760 entry->parent_ip = parent_ip;
1761
1762 trace_buffer_unlock_commit(buffer, event, flags, pc);
1763
1764 out:
1765 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1766 preempt_enable_notrace();
1767 }
1768
1769 static struct ftrace_ops trace_ops __initdata =
1770 {
1771 .func = function_test_events_call,
1772 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
1773 };
1774
1775 static __init void event_trace_self_test_with_function(void)
1776 {
1777 int ret;
1778 ret = register_ftrace_function(&trace_ops);
1779 if (WARN_ON(ret < 0)) {
1780 pr_info("Failed to enable function tracer for event tests\n");
1781 return;
1782 }
1783 pr_info("Running tests again, along with the function tracer\n");
1784 event_trace_self_tests();
1785 unregister_ftrace_function(&trace_ops);
1786 }
1787 #else
1788 static __init void event_trace_self_test_with_function(void)
1789 {
1790 }
1791 #endif
1792
1793 static __init int event_trace_self_tests_init(void)
1794 {
1795 if (!tracing_selftest_disabled) {
1796 event_trace_self_tests();
1797 event_trace_self_test_with_function();
1798 }
1799
1800 return 0;
1801 }
1802
1803 late_initcall(event_trace_self_tests_init);
1804
1805 #endif
This page took 0.107099 seconds and 6 git commands to generate.