tracing: Return error if register_ftrace_function_probe() fails for event_enable_func()
[deliverable/linux.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_common_fields);
38
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43
44 /* Double loops, do not use break, only goto's work */
45 #define do_for_each_event_file(tr, file) \
46 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
47 list_for_each_entry(file, &tr->events, list)
48
49 #define do_for_each_event_file_safe(tr, file) \
50 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
51 struct ftrace_event_file *___n; \
52 list_for_each_entry_safe(file, ___n, &tr->events, list)
53
54 #define while_for_each_event_file() \
55 }
56
57 static struct list_head *
58 trace_get_fields(struct ftrace_event_call *event_call)
59 {
60 if (!event_call->class->get_fields)
61 return &event_call->class->fields;
62 return event_call->class->get_fields(event_call);
63 }
64
65 static struct ftrace_event_field *
66 __find_event_field(struct list_head *head, char *name)
67 {
68 struct ftrace_event_field *field;
69
70 list_for_each_entry(field, head, link) {
71 if (!strcmp(field->name, name))
72 return field;
73 }
74
75 return NULL;
76 }
77
78 struct ftrace_event_field *
79 trace_find_event_field(struct ftrace_event_call *call, char *name)
80 {
81 struct ftrace_event_field *field;
82 struct list_head *head;
83
84 field = __find_event_field(&ftrace_common_fields, name);
85 if (field)
86 return field;
87
88 head = trace_get_fields(call);
89 return __find_event_field(head, name);
90 }
91
92 static int __trace_define_field(struct list_head *head, const char *type,
93 const char *name, int offset, int size,
94 int is_signed, int filter_type)
95 {
96 struct ftrace_event_field *field;
97
98 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
99 if (!field)
100 goto err;
101
102 field->name = name;
103 field->type = type;
104
105 if (filter_type == FILTER_OTHER)
106 field->filter_type = filter_assign_type(type);
107 else
108 field->filter_type = filter_type;
109
110 field->offset = offset;
111 field->size = size;
112 field->is_signed = is_signed;
113
114 list_add(&field->link, head);
115
116 return 0;
117
118 err:
119 kmem_cache_free(field_cachep, field);
120
121 return -ENOMEM;
122 }
123
124 int trace_define_field(struct ftrace_event_call *call, const char *type,
125 const char *name, int offset, int size, int is_signed,
126 int filter_type)
127 {
128 struct list_head *head;
129
130 if (WARN_ON(!call->class))
131 return 0;
132
133 head = trace_get_fields(call);
134 return __trace_define_field(head, type, name, offset, size,
135 is_signed, filter_type);
136 }
137 EXPORT_SYMBOL_GPL(trace_define_field);
138
139 #define __common_field(type, item) \
140 ret = __trace_define_field(&ftrace_common_fields, #type, \
141 "common_" #item, \
142 offsetof(typeof(ent), item), \
143 sizeof(ent.item), \
144 is_signed_type(type), FILTER_OTHER); \
145 if (ret) \
146 return ret;
147
148 static int trace_define_common_fields(void)
149 {
150 int ret;
151 struct trace_entry ent;
152
153 __common_field(unsigned short, type);
154 __common_field(unsigned char, flags);
155 __common_field(unsigned char, preempt_count);
156 __common_field(int, pid);
157
158 return ret;
159 }
160
161 static void trace_destroy_fields(struct ftrace_event_call *call)
162 {
163 struct ftrace_event_field *field, *next;
164 struct list_head *head;
165
166 head = trace_get_fields(call);
167 list_for_each_entry_safe(field, next, head, link) {
168 list_del(&field->link);
169 kmem_cache_free(field_cachep, field);
170 }
171 }
172
173 int trace_event_raw_init(struct ftrace_event_call *call)
174 {
175 int id;
176
177 id = register_ftrace_event(&call->event);
178 if (!id)
179 return -ENODEV;
180
181 return 0;
182 }
183 EXPORT_SYMBOL_GPL(trace_event_raw_init);
184
185 int ftrace_event_reg(struct ftrace_event_call *call,
186 enum trace_reg type, void *data)
187 {
188 struct ftrace_event_file *file = data;
189
190 switch (type) {
191 case TRACE_REG_REGISTER:
192 return tracepoint_probe_register(call->name,
193 call->class->probe,
194 file);
195 case TRACE_REG_UNREGISTER:
196 tracepoint_probe_unregister(call->name,
197 call->class->probe,
198 file);
199 return 0;
200
201 #ifdef CONFIG_PERF_EVENTS
202 case TRACE_REG_PERF_REGISTER:
203 return tracepoint_probe_register(call->name,
204 call->class->perf_probe,
205 call);
206 case TRACE_REG_PERF_UNREGISTER:
207 tracepoint_probe_unregister(call->name,
208 call->class->perf_probe,
209 call);
210 return 0;
211 case TRACE_REG_PERF_OPEN:
212 case TRACE_REG_PERF_CLOSE:
213 case TRACE_REG_PERF_ADD:
214 case TRACE_REG_PERF_DEL:
215 return 0;
216 #endif
217 }
218 return 0;
219 }
220 EXPORT_SYMBOL_GPL(ftrace_event_reg);
221
222 void trace_event_enable_cmd_record(bool enable)
223 {
224 struct ftrace_event_file *file;
225 struct trace_array *tr;
226
227 mutex_lock(&event_mutex);
228 do_for_each_event_file(tr, file) {
229
230 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
231 continue;
232
233 if (enable) {
234 tracing_start_cmdline_record();
235 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
236 } else {
237 tracing_stop_cmdline_record();
238 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
239 }
240 } while_for_each_event_file();
241 mutex_unlock(&event_mutex);
242 }
243
244 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
245 int enable, int soft_disable)
246 {
247 struct ftrace_event_call *call = file->event_call;
248 int ret = 0;
249 int disable;
250
251 switch (enable) {
252 case 0:
253 /*
254 * When soft_disable is set and enable is cleared, we want
255 * to clear the SOFT_DISABLED flag but leave the event in the
256 * state that it was. That is, if the event was enabled and
257 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
258 * is set we do not want the event to be enabled before we
259 * clear the bit.
260 *
261 * When soft_disable is not set but the SOFT_MODE flag is,
262 * we do nothing. Do not disable the tracepoint, otherwise
263 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
264 */
265 if (soft_disable) {
266 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
267 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
268 } else
269 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
270
271 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
272 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
273 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
274 tracing_stop_cmdline_record();
275 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
276 }
277 call->class->reg(call, TRACE_REG_UNREGISTER, file);
278 }
279 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
280 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
281 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
282 break;
283 case 1:
284 /*
285 * When soft_disable is set and enable is set, we want to
286 * register the tracepoint for the event, but leave the event
287 * as is. That means, if the event was already enabled, we do
288 * nothing (but set SOFT_MODE). If the event is disabled, we
289 * set SOFT_DISABLED before enabling the event tracepoint, so
290 * it still seems to be disabled.
291 */
292 if (!soft_disable)
293 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
294 else
295 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
296
297 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
298
299 /* Keep the event disabled, when going to SOFT_MODE. */
300 if (soft_disable)
301 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
302
303 if (trace_flags & TRACE_ITER_RECORD_CMD) {
304 tracing_start_cmdline_record();
305 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
306 }
307 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
308 if (ret) {
309 tracing_stop_cmdline_record();
310 pr_info("event trace: Could not enable event "
311 "%s\n", call->name);
312 break;
313 }
314 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
315
316 /* WAS_ENABLED gets set but never cleared. */
317 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
318 }
319 break;
320 }
321
322 return ret;
323 }
324
325 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
326 int enable)
327 {
328 return __ftrace_event_enable_disable(file, enable, 0);
329 }
330
331 static void ftrace_clear_events(struct trace_array *tr)
332 {
333 struct ftrace_event_file *file;
334
335 mutex_lock(&event_mutex);
336 list_for_each_entry(file, &tr->events, list) {
337 ftrace_event_enable_disable(file, 0);
338 }
339 mutex_unlock(&event_mutex);
340 }
341
342 static void __put_system(struct event_subsystem *system)
343 {
344 struct event_filter *filter = system->filter;
345
346 WARN_ON_ONCE(system->ref_count == 0);
347 if (--system->ref_count)
348 return;
349
350 list_del(&system->list);
351
352 if (filter) {
353 kfree(filter->filter_string);
354 kfree(filter);
355 }
356 kfree(system);
357 }
358
359 static void __get_system(struct event_subsystem *system)
360 {
361 WARN_ON_ONCE(system->ref_count == 0);
362 system->ref_count++;
363 }
364
365 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
366 {
367 WARN_ON_ONCE(dir->ref_count == 0);
368 dir->ref_count++;
369 __get_system(dir->subsystem);
370 }
371
372 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
373 {
374 WARN_ON_ONCE(dir->ref_count == 0);
375 /* If the subsystem is about to be freed, the dir must be too */
376 WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
377
378 __put_system(dir->subsystem);
379 if (!--dir->ref_count)
380 kfree(dir);
381 }
382
383 static void put_system(struct ftrace_subsystem_dir *dir)
384 {
385 mutex_lock(&event_mutex);
386 __put_system_dir(dir);
387 mutex_unlock(&event_mutex);
388 }
389
390 /*
391 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
392 */
393 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
394 const char *sub, const char *event, int set)
395 {
396 struct ftrace_event_file *file;
397 struct ftrace_event_call *call;
398 int ret = -EINVAL;
399
400 mutex_lock(&event_mutex);
401 list_for_each_entry(file, &tr->events, list) {
402
403 call = file->event_call;
404
405 if (!call->name || !call->class || !call->class->reg)
406 continue;
407
408 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
409 continue;
410
411 if (match &&
412 strcmp(match, call->name) != 0 &&
413 strcmp(match, call->class->system) != 0)
414 continue;
415
416 if (sub && strcmp(sub, call->class->system) != 0)
417 continue;
418
419 if (event && strcmp(event, call->name) != 0)
420 continue;
421
422 ftrace_event_enable_disable(file, set);
423
424 ret = 0;
425 }
426 mutex_unlock(&event_mutex);
427
428 return ret;
429 }
430
431 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
432 {
433 char *event = NULL, *sub = NULL, *match;
434
435 /*
436 * The buf format can be <subsystem>:<event-name>
437 * *:<event-name> means any event by that name.
438 * :<event-name> is the same.
439 *
440 * <subsystem>:* means all events in that subsystem
441 * <subsystem>: means the same.
442 *
443 * <name> (no ':') means all events in a subsystem with
444 * the name <name> or any event that matches <name>
445 */
446
447 match = strsep(&buf, ":");
448 if (buf) {
449 sub = match;
450 event = buf;
451 match = NULL;
452
453 if (!strlen(sub) || strcmp(sub, "*") == 0)
454 sub = NULL;
455 if (!strlen(event) || strcmp(event, "*") == 0)
456 event = NULL;
457 }
458
459 return __ftrace_set_clr_event(tr, match, sub, event, set);
460 }
461
462 /**
463 * trace_set_clr_event - enable or disable an event
464 * @system: system name to match (NULL for any system)
465 * @event: event name to match (NULL for all events, within system)
466 * @set: 1 to enable, 0 to disable
467 *
468 * This is a way for other parts of the kernel to enable or disable
469 * event recording.
470 *
471 * Returns 0 on success, -EINVAL if the parameters do not match any
472 * registered events.
473 */
474 int trace_set_clr_event(const char *system, const char *event, int set)
475 {
476 struct trace_array *tr = top_trace_array();
477
478 return __ftrace_set_clr_event(tr, NULL, system, event, set);
479 }
480 EXPORT_SYMBOL_GPL(trace_set_clr_event);
481
482 /* 128 should be much more than enough */
483 #define EVENT_BUF_SIZE 127
484
485 static ssize_t
486 ftrace_event_write(struct file *file, const char __user *ubuf,
487 size_t cnt, loff_t *ppos)
488 {
489 struct trace_parser parser;
490 struct seq_file *m = file->private_data;
491 struct trace_array *tr = m->private;
492 ssize_t read, ret;
493
494 if (!cnt)
495 return 0;
496
497 ret = tracing_update_buffers();
498 if (ret < 0)
499 return ret;
500
501 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
502 return -ENOMEM;
503
504 read = trace_get_user(&parser, ubuf, cnt, ppos);
505
506 if (read >= 0 && trace_parser_loaded((&parser))) {
507 int set = 1;
508
509 if (*parser.buffer == '!')
510 set = 0;
511
512 parser.buffer[parser.idx] = 0;
513
514 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
515 if (ret)
516 goto out_put;
517 }
518
519 ret = read;
520
521 out_put:
522 trace_parser_put(&parser);
523
524 return ret;
525 }
526
527 static void *
528 t_next(struct seq_file *m, void *v, loff_t *pos)
529 {
530 struct ftrace_event_file *file = v;
531 struct ftrace_event_call *call;
532 struct trace_array *tr = m->private;
533
534 (*pos)++;
535
536 list_for_each_entry_continue(file, &tr->events, list) {
537 call = file->event_call;
538 /*
539 * The ftrace subsystem is for showing formats only.
540 * They can not be enabled or disabled via the event files.
541 */
542 if (call->class && call->class->reg)
543 return file;
544 }
545
546 return NULL;
547 }
548
549 static void *t_start(struct seq_file *m, loff_t *pos)
550 {
551 struct ftrace_event_file *file;
552 struct trace_array *tr = m->private;
553 loff_t l;
554
555 mutex_lock(&event_mutex);
556
557 file = list_entry(&tr->events, struct ftrace_event_file, list);
558 for (l = 0; l <= *pos; ) {
559 file = t_next(m, file, &l);
560 if (!file)
561 break;
562 }
563 return file;
564 }
565
566 static void *
567 s_next(struct seq_file *m, void *v, loff_t *pos)
568 {
569 struct ftrace_event_file *file = v;
570 struct trace_array *tr = m->private;
571
572 (*pos)++;
573
574 list_for_each_entry_continue(file, &tr->events, list) {
575 if (file->flags & FTRACE_EVENT_FL_ENABLED)
576 return file;
577 }
578
579 return NULL;
580 }
581
582 static void *s_start(struct seq_file *m, loff_t *pos)
583 {
584 struct ftrace_event_file *file;
585 struct trace_array *tr = m->private;
586 loff_t l;
587
588 mutex_lock(&event_mutex);
589
590 file = list_entry(&tr->events, struct ftrace_event_file, list);
591 for (l = 0; l <= *pos; ) {
592 file = s_next(m, file, &l);
593 if (!file)
594 break;
595 }
596 return file;
597 }
598
599 static int t_show(struct seq_file *m, void *v)
600 {
601 struct ftrace_event_file *file = v;
602 struct ftrace_event_call *call = file->event_call;
603
604 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
605 seq_printf(m, "%s:", call->class->system);
606 seq_printf(m, "%s\n", call->name);
607
608 return 0;
609 }
610
611 static void t_stop(struct seq_file *m, void *p)
612 {
613 mutex_unlock(&event_mutex);
614 }
615
616 static ssize_t
617 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
618 loff_t *ppos)
619 {
620 struct ftrace_event_file *file = filp->private_data;
621 char *buf;
622
623 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
624 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
625 buf = "0*\n";
626 else
627 buf = "1\n";
628 } else
629 buf = "0\n";
630
631 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
632 }
633
634 static ssize_t
635 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
636 loff_t *ppos)
637 {
638 struct ftrace_event_file *file = filp->private_data;
639 unsigned long val;
640 int ret;
641
642 if (!file)
643 return -EINVAL;
644
645 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
646 if (ret)
647 return ret;
648
649 ret = tracing_update_buffers();
650 if (ret < 0)
651 return ret;
652
653 switch (val) {
654 case 0:
655 case 1:
656 mutex_lock(&event_mutex);
657 ret = ftrace_event_enable_disable(file, val);
658 mutex_unlock(&event_mutex);
659 break;
660
661 default:
662 return -EINVAL;
663 }
664
665 *ppos += cnt;
666
667 return ret ? ret : cnt;
668 }
669
670 static ssize_t
671 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
672 loff_t *ppos)
673 {
674 const char set_to_char[4] = { '?', '0', '1', 'X' };
675 struct ftrace_subsystem_dir *dir = filp->private_data;
676 struct event_subsystem *system = dir->subsystem;
677 struct ftrace_event_call *call;
678 struct ftrace_event_file *file;
679 struct trace_array *tr = dir->tr;
680 char buf[2];
681 int set = 0;
682 int ret;
683
684 mutex_lock(&event_mutex);
685 list_for_each_entry(file, &tr->events, list) {
686 call = file->event_call;
687 if (!call->name || !call->class || !call->class->reg)
688 continue;
689
690 if (system && strcmp(call->class->system, system->name) != 0)
691 continue;
692
693 /*
694 * We need to find out if all the events are set
695 * or if all events or cleared, or if we have
696 * a mixture.
697 */
698 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
699
700 /*
701 * If we have a mixture, no need to look further.
702 */
703 if (set == 3)
704 break;
705 }
706 mutex_unlock(&event_mutex);
707
708 buf[0] = set_to_char[set];
709 buf[1] = '\n';
710
711 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
712
713 return ret;
714 }
715
716 static ssize_t
717 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
718 loff_t *ppos)
719 {
720 struct ftrace_subsystem_dir *dir = filp->private_data;
721 struct event_subsystem *system = dir->subsystem;
722 const char *name = NULL;
723 unsigned long val;
724 ssize_t ret;
725
726 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
727 if (ret)
728 return ret;
729
730 ret = tracing_update_buffers();
731 if (ret < 0)
732 return ret;
733
734 if (val != 0 && val != 1)
735 return -EINVAL;
736
737 /*
738 * Opening of "enable" adds a ref count to system,
739 * so the name is safe to use.
740 */
741 if (system)
742 name = system->name;
743
744 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
745 if (ret)
746 goto out;
747
748 ret = cnt;
749
750 out:
751 *ppos += cnt;
752
753 return ret;
754 }
755
756 enum {
757 FORMAT_HEADER = 1,
758 FORMAT_FIELD_SEPERATOR = 2,
759 FORMAT_PRINTFMT = 3,
760 };
761
762 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
763 {
764 struct ftrace_event_call *call = m->private;
765 struct ftrace_event_field *field;
766 struct list_head *common_head = &ftrace_common_fields;
767 struct list_head *head = trace_get_fields(call);
768
769 (*pos)++;
770
771 switch ((unsigned long)v) {
772 case FORMAT_HEADER:
773 if (unlikely(list_empty(common_head)))
774 return NULL;
775
776 field = list_entry(common_head->prev,
777 struct ftrace_event_field, link);
778 return field;
779
780 case FORMAT_FIELD_SEPERATOR:
781 if (unlikely(list_empty(head)))
782 return NULL;
783
784 field = list_entry(head->prev, struct ftrace_event_field, link);
785 return field;
786
787 case FORMAT_PRINTFMT:
788 /* all done */
789 return NULL;
790 }
791
792 field = v;
793 if (field->link.prev == common_head)
794 return (void *)FORMAT_FIELD_SEPERATOR;
795 else if (field->link.prev == head)
796 return (void *)FORMAT_PRINTFMT;
797
798 field = list_entry(field->link.prev, struct ftrace_event_field, link);
799
800 return field;
801 }
802
803 static void *f_start(struct seq_file *m, loff_t *pos)
804 {
805 loff_t l = 0;
806 void *p;
807
808 /* Start by showing the header */
809 if (!*pos)
810 return (void *)FORMAT_HEADER;
811
812 p = (void *)FORMAT_HEADER;
813 do {
814 p = f_next(m, p, &l);
815 } while (p && l < *pos);
816
817 return p;
818 }
819
820 static int f_show(struct seq_file *m, void *v)
821 {
822 struct ftrace_event_call *call = m->private;
823 struct ftrace_event_field *field;
824 const char *array_descriptor;
825
826 switch ((unsigned long)v) {
827 case FORMAT_HEADER:
828 seq_printf(m, "name: %s\n", call->name);
829 seq_printf(m, "ID: %d\n", call->event.type);
830 seq_printf(m, "format:\n");
831 return 0;
832
833 case FORMAT_FIELD_SEPERATOR:
834 seq_putc(m, '\n');
835 return 0;
836
837 case FORMAT_PRINTFMT:
838 seq_printf(m, "\nprint fmt: %s\n",
839 call->print_fmt);
840 return 0;
841 }
842
843 field = v;
844
845 /*
846 * Smartly shows the array type(except dynamic array).
847 * Normal:
848 * field:TYPE VAR
849 * If TYPE := TYPE[LEN], it is shown:
850 * field:TYPE VAR[LEN]
851 */
852 array_descriptor = strchr(field->type, '[');
853
854 if (!strncmp(field->type, "__data_loc", 10))
855 array_descriptor = NULL;
856
857 if (!array_descriptor)
858 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
859 field->type, field->name, field->offset,
860 field->size, !!field->is_signed);
861 else
862 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
863 (int)(array_descriptor - field->type),
864 field->type, field->name,
865 array_descriptor, field->offset,
866 field->size, !!field->is_signed);
867
868 return 0;
869 }
870
871 static void f_stop(struct seq_file *m, void *p)
872 {
873 }
874
875 static const struct seq_operations trace_format_seq_ops = {
876 .start = f_start,
877 .next = f_next,
878 .stop = f_stop,
879 .show = f_show,
880 };
881
882 static int trace_format_open(struct inode *inode, struct file *file)
883 {
884 struct ftrace_event_call *call = inode->i_private;
885 struct seq_file *m;
886 int ret;
887
888 ret = seq_open(file, &trace_format_seq_ops);
889 if (ret < 0)
890 return ret;
891
892 m = file->private_data;
893 m->private = call;
894
895 return 0;
896 }
897
898 static ssize_t
899 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
900 {
901 struct ftrace_event_call *call = filp->private_data;
902 struct trace_seq *s;
903 int r;
904
905 if (*ppos)
906 return 0;
907
908 s = kmalloc(sizeof(*s), GFP_KERNEL);
909 if (!s)
910 return -ENOMEM;
911
912 trace_seq_init(s);
913 trace_seq_printf(s, "%d\n", call->event.type);
914
915 r = simple_read_from_buffer(ubuf, cnt, ppos,
916 s->buffer, s->len);
917 kfree(s);
918 return r;
919 }
920
921 static ssize_t
922 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
923 loff_t *ppos)
924 {
925 struct ftrace_event_call *call = filp->private_data;
926 struct trace_seq *s;
927 int r;
928
929 if (*ppos)
930 return 0;
931
932 s = kmalloc(sizeof(*s), GFP_KERNEL);
933 if (!s)
934 return -ENOMEM;
935
936 trace_seq_init(s);
937
938 print_event_filter(call, s);
939 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
940
941 kfree(s);
942
943 return r;
944 }
945
946 static ssize_t
947 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
948 loff_t *ppos)
949 {
950 struct ftrace_event_call *call = filp->private_data;
951 char *buf;
952 int err;
953
954 if (cnt >= PAGE_SIZE)
955 return -EINVAL;
956
957 buf = (char *)__get_free_page(GFP_TEMPORARY);
958 if (!buf)
959 return -ENOMEM;
960
961 if (copy_from_user(buf, ubuf, cnt)) {
962 free_page((unsigned long) buf);
963 return -EFAULT;
964 }
965 buf[cnt] = '\0';
966
967 err = apply_event_filter(call, buf);
968 free_page((unsigned long) buf);
969 if (err < 0)
970 return err;
971
972 *ppos += cnt;
973
974 return cnt;
975 }
976
977 static LIST_HEAD(event_subsystems);
978
979 static int subsystem_open(struct inode *inode, struct file *filp)
980 {
981 struct event_subsystem *system = NULL;
982 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
983 struct trace_array *tr;
984 int ret;
985
986 /* Make sure the system still exists */
987 mutex_lock(&event_mutex);
988 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
989 list_for_each_entry(dir, &tr->systems, list) {
990 if (dir == inode->i_private) {
991 /* Don't open systems with no events */
992 if (dir->nr_events) {
993 __get_system_dir(dir);
994 system = dir->subsystem;
995 }
996 goto exit_loop;
997 }
998 }
999 }
1000 exit_loop:
1001 mutex_unlock(&event_mutex);
1002
1003 if (!system)
1004 return -ENODEV;
1005
1006 /* Some versions of gcc think dir can be uninitialized here */
1007 WARN_ON(!dir);
1008
1009 ret = tracing_open_generic(inode, filp);
1010 if (ret < 0)
1011 put_system(dir);
1012
1013 return ret;
1014 }
1015
1016 static int system_tr_open(struct inode *inode, struct file *filp)
1017 {
1018 struct ftrace_subsystem_dir *dir;
1019 struct trace_array *tr = inode->i_private;
1020 int ret;
1021
1022 /* Make a temporary dir that has no system but points to tr */
1023 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1024 if (!dir)
1025 return -ENOMEM;
1026
1027 dir->tr = tr;
1028
1029 ret = tracing_open_generic(inode, filp);
1030 if (ret < 0)
1031 kfree(dir);
1032
1033 filp->private_data = dir;
1034
1035 return ret;
1036 }
1037
1038 static int subsystem_release(struct inode *inode, struct file *file)
1039 {
1040 struct ftrace_subsystem_dir *dir = file->private_data;
1041
1042 /*
1043 * If dir->subsystem is NULL, then this is a temporary
1044 * descriptor that was made for a trace_array to enable
1045 * all subsystems.
1046 */
1047 if (dir->subsystem)
1048 put_system(dir);
1049 else
1050 kfree(dir);
1051
1052 return 0;
1053 }
1054
1055 static ssize_t
1056 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1057 loff_t *ppos)
1058 {
1059 struct ftrace_subsystem_dir *dir = filp->private_data;
1060 struct event_subsystem *system = dir->subsystem;
1061 struct trace_seq *s;
1062 int r;
1063
1064 if (*ppos)
1065 return 0;
1066
1067 s = kmalloc(sizeof(*s), GFP_KERNEL);
1068 if (!s)
1069 return -ENOMEM;
1070
1071 trace_seq_init(s);
1072
1073 print_subsystem_event_filter(system, s);
1074 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1075
1076 kfree(s);
1077
1078 return r;
1079 }
1080
1081 static ssize_t
1082 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1083 loff_t *ppos)
1084 {
1085 struct ftrace_subsystem_dir *dir = filp->private_data;
1086 char *buf;
1087 int err;
1088
1089 if (cnt >= PAGE_SIZE)
1090 return -EINVAL;
1091
1092 buf = (char *)__get_free_page(GFP_TEMPORARY);
1093 if (!buf)
1094 return -ENOMEM;
1095
1096 if (copy_from_user(buf, ubuf, cnt)) {
1097 free_page((unsigned long) buf);
1098 return -EFAULT;
1099 }
1100 buf[cnt] = '\0';
1101
1102 err = apply_subsystem_event_filter(dir, buf);
1103 free_page((unsigned long) buf);
1104 if (err < 0)
1105 return err;
1106
1107 *ppos += cnt;
1108
1109 return cnt;
1110 }
1111
1112 static ssize_t
1113 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1114 {
1115 int (*func)(struct trace_seq *s) = filp->private_data;
1116 struct trace_seq *s;
1117 int r;
1118
1119 if (*ppos)
1120 return 0;
1121
1122 s = kmalloc(sizeof(*s), GFP_KERNEL);
1123 if (!s)
1124 return -ENOMEM;
1125
1126 trace_seq_init(s);
1127
1128 func(s);
1129 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1130
1131 kfree(s);
1132
1133 return r;
1134 }
1135
1136 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1137 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1138
1139 static const struct seq_operations show_event_seq_ops = {
1140 .start = t_start,
1141 .next = t_next,
1142 .show = t_show,
1143 .stop = t_stop,
1144 };
1145
1146 static const struct seq_operations show_set_event_seq_ops = {
1147 .start = s_start,
1148 .next = s_next,
1149 .show = t_show,
1150 .stop = t_stop,
1151 };
1152
1153 static const struct file_operations ftrace_avail_fops = {
1154 .open = ftrace_event_avail_open,
1155 .read = seq_read,
1156 .llseek = seq_lseek,
1157 .release = seq_release,
1158 };
1159
1160 static const struct file_operations ftrace_set_event_fops = {
1161 .open = ftrace_event_set_open,
1162 .read = seq_read,
1163 .write = ftrace_event_write,
1164 .llseek = seq_lseek,
1165 .release = seq_release,
1166 };
1167
1168 static const struct file_operations ftrace_enable_fops = {
1169 .open = tracing_open_generic,
1170 .read = event_enable_read,
1171 .write = event_enable_write,
1172 .llseek = default_llseek,
1173 };
1174
1175 static const struct file_operations ftrace_event_format_fops = {
1176 .open = trace_format_open,
1177 .read = seq_read,
1178 .llseek = seq_lseek,
1179 .release = seq_release,
1180 };
1181
1182 static const struct file_operations ftrace_event_id_fops = {
1183 .open = tracing_open_generic,
1184 .read = event_id_read,
1185 .llseek = default_llseek,
1186 };
1187
1188 static const struct file_operations ftrace_event_filter_fops = {
1189 .open = tracing_open_generic,
1190 .read = event_filter_read,
1191 .write = event_filter_write,
1192 .llseek = default_llseek,
1193 };
1194
1195 static const struct file_operations ftrace_subsystem_filter_fops = {
1196 .open = subsystem_open,
1197 .read = subsystem_filter_read,
1198 .write = subsystem_filter_write,
1199 .llseek = default_llseek,
1200 .release = subsystem_release,
1201 };
1202
1203 static const struct file_operations ftrace_system_enable_fops = {
1204 .open = subsystem_open,
1205 .read = system_enable_read,
1206 .write = system_enable_write,
1207 .llseek = default_llseek,
1208 .release = subsystem_release,
1209 };
1210
1211 static const struct file_operations ftrace_tr_enable_fops = {
1212 .open = system_tr_open,
1213 .read = system_enable_read,
1214 .write = system_enable_write,
1215 .llseek = default_llseek,
1216 .release = subsystem_release,
1217 };
1218
1219 static const struct file_operations ftrace_show_header_fops = {
1220 .open = tracing_open_generic,
1221 .read = show_header,
1222 .llseek = default_llseek,
1223 };
1224
1225 static int
1226 ftrace_event_open(struct inode *inode, struct file *file,
1227 const struct seq_operations *seq_ops)
1228 {
1229 struct seq_file *m;
1230 int ret;
1231
1232 ret = seq_open(file, seq_ops);
1233 if (ret < 0)
1234 return ret;
1235 m = file->private_data;
1236 /* copy tr over to seq ops */
1237 m->private = inode->i_private;
1238
1239 return ret;
1240 }
1241
1242 static int
1243 ftrace_event_avail_open(struct inode *inode, struct file *file)
1244 {
1245 const struct seq_operations *seq_ops = &show_event_seq_ops;
1246
1247 return ftrace_event_open(inode, file, seq_ops);
1248 }
1249
1250 static int
1251 ftrace_event_set_open(struct inode *inode, struct file *file)
1252 {
1253 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1254 struct trace_array *tr = inode->i_private;
1255
1256 if ((file->f_mode & FMODE_WRITE) &&
1257 (file->f_flags & O_TRUNC))
1258 ftrace_clear_events(tr);
1259
1260 return ftrace_event_open(inode, file, seq_ops);
1261 }
1262
1263 static struct event_subsystem *
1264 create_new_subsystem(const char *name)
1265 {
1266 struct event_subsystem *system;
1267
1268 /* need to create new entry */
1269 system = kmalloc(sizeof(*system), GFP_KERNEL);
1270 if (!system)
1271 return NULL;
1272
1273 system->ref_count = 1;
1274 system->name = name;
1275
1276 system->filter = NULL;
1277
1278 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1279 if (!system->filter)
1280 goto out_free;
1281
1282 list_add(&system->list, &event_subsystems);
1283
1284 return system;
1285
1286 out_free:
1287 kfree(system);
1288 return NULL;
1289 }
1290
1291 static struct dentry *
1292 event_subsystem_dir(struct trace_array *tr, const char *name,
1293 struct ftrace_event_file *file, struct dentry *parent)
1294 {
1295 struct ftrace_subsystem_dir *dir;
1296 struct event_subsystem *system;
1297 struct dentry *entry;
1298
1299 /* First see if we did not already create this dir */
1300 list_for_each_entry(dir, &tr->systems, list) {
1301 system = dir->subsystem;
1302 if (strcmp(system->name, name) == 0) {
1303 dir->nr_events++;
1304 file->system = dir;
1305 return dir->entry;
1306 }
1307 }
1308
1309 /* Now see if the system itself exists. */
1310 list_for_each_entry(system, &event_subsystems, list) {
1311 if (strcmp(system->name, name) == 0)
1312 break;
1313 }
1314 /* Reset system variable when not found */
1315 if (&system->list == &event_subsystems)
1316 system = NULL;
1317
1318 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1319 if (!dir)
1320 goto out_fail;
1321
1322 if (!system) {
1323 system = create_new_subsystem(name);
1324 if (!system)
1325 goto out_free;
1326 } else
1327 __get_system(system);
1328
1329 dir->entry = debugfs_create_dir(name, parent);
1330 if (!dir->entry) {
1331 pr_warning("Failed to create system directory %s\n", name);
1332 __put_system(system);
1333 goto out_free;
1334 }
1335
1336 dir->tr = tr;
1337 dir->ref_count = 1;
1338 dir->nr_events = 1;
1339 dir->subsystem = system;
1340 file->system = dir;
1341
1342 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1343 &ftrace_subsystem_filter_fops);
1344 if (!entry) {
1345 kfree(system->filter);
1346 system->filter = NULL;
1347 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1348 }
1349
1350 trace_create_file("enable", 0644, dir->entry, dir,
1351 &ftrace_system_enable_fops);
1352
1353 list_add(&dir->list, &tr->systems);
1354
1355 return dir->entry;
1356
1357 out_free:
1358 kfree(dir);
1359 out_fail:
1360 /* Only print this message if failed on memory allocation */
1361 if (!dir || !system)
1362 pr_warning("No memory to create event subsystem %s\n",
1363 name);
1364 return NULL;
1365 }
1366
1367 static int
1368 event_create_dir(struct dentry *parent,
1369 struct ftrace_event_file *file,
1370 const struct file_operations *id,
1371 const struct file_operations *enable,
1372 const struct file_operations *filter,
1373 const struct file_operations *format)
1374 {
1375 struct ftrace_event_call *call = file->event_call;
1376 struct trace_array *tr = file->tr;
1377 struct list_head *head;
1378 struct dentry *d_events;
1379 int ret;
1380
1381 /*
1382 * If the trace point header did not define TRACE_SYSTEM
1383 * then the system would be called "TRACE_SYSTEM".
1384 */
1385 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1386 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1387 if (!d_events)
1388 return -ENOMEM;
1389 } else
1390 d_events = parent;
1391
1392 file->dir = debugfs_create_dir(call->name, d_events);
1393 if (!file->dir) {
1394 pr_warning("Could not create debugfs '%s' directory\n",
1395 call->name);
1396 return -1;
1397 }
1398
1399 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1400 trace_create_file("enable", 0644, file->dir, file,
1401 enable);
1402
1403 #ifdef CONFIG_PERF_EVENTS
1404 if (call->event.type && call->class->reg)
1405 trace_create_file("id", 0444, file->dir, call,
1406 id);
1407 #endif
1408
1409 /*
1410 * Other events may have the same class. Only update
1411 * the fields if they are not already defined.
1412 */
1413 head = trace_get_fields(call);
1414 if (list_empty(head)) {
1415 ret = call->class->define_fields(call);
1416 if (ret < 0) {
1417 pr_warning("Could not initialize trace point"
1418 " events/%s\n", call->name);
1419 return -1;
1420 }
1421 }
1422 trace_create_file("filter", 0644, file->dir, call,
1423 filter);
1424
1425 trace_create_file("format", 0444, file->dir, call,
1426 format);
1427
1428 return 0;
1429 }
1430
1431 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1432 {
1433 if (!dir)
1434 return;
1435
1436 if (!--dir->nr_events) {
1437 debugfs_remove_recursive(dir->entry);
1438 list_del(&dir->list);
1439 __put_system_dir(dir);
1440 }
1441 }
1442
1443 static void remove_event_from_tracers(struct ftrace_event_call *call)
1444 {
1445 struct ftrace_event_file *file;
1446 struct trace_array *tr;
1447
1448 do_for_each_event_file_safe(tr, file) {
1449
1450 if (file->event_call != call)
1451 continue;
1452
1453 list_del(&file->list);
1454 debugfs_remove_recursive(file->dir);
1455 remove_subsystem(file->system);
1456 kmem_cache_free(file_cachep, file);
1457
1458 /*
1459 * The do_for_each_event_file_safe() is
1460 * a double loop. After finding the call for this
1461 * trace_array, we use break to jump to the next
1462 * trace_array.
1463 */
1464 break;
1465 } while_for_each_event_file();
1466 }
1467
1468 static void event_remove(struct ftrace_event_call *call)
1469 {
1470 struct trace_array *tr;
1471 struct ftrace_event_file *file;
1472
1473 do_for_each_event_file(tr, file) {
1474 if (file->event_call != call)
1475 continue;
1476 ftrace_event_enable_disable(file, 0);
1477 /*
1478 * The do_for_each_event_file() is
1479 * a double loop. After finding the call for this
1480 * trace_array, we use break to jump to the next
1481 * trace_array.
1482 */
1483 break;
1484 } while_for_each_event_file();
1485
1486 if (call->event.funcs)
1487 __unregister_ftrace_event(&call->event);
1488 remove_event_from_tracers(call);
1489 list_del(&call->list);
1490 }
1491
1492 static int event_init(struct ftrace_event_call *call)
1493 {
1494 int ret = 0;
1495
1496 if (WARN_ON(!call->name))
1497 return -EINVAL;
1498
1499 if (call->class->raw_init) {
1500 ret = call->class->raw_init(call);
1501 if (ret < 0 && ret != -ENOSYS)
1502 pr_warn("Could not initialize trace events/%s\n",
1503 call->name);
1504 }
1505
1506 return ret;
1507 }
1508
1509 static int
1510 __register_event(struct ftrace_event_call *call, struct module *mod)
1511 {
1512 int ret;
1513
1514 ret = event_init(call);
1515 if (ret < 0)
1516 return ret;
1517
1518 list_add(&call->list, &ftrace_events);
1519 call->mod = mod;
1520
1521 return 0;
1522 }
1523
1524 /* Add an event to a trace directory */
1525 static int
1526 __trace_add_new_event(struct ftrace_event_call *call,
1527 struct trace_array *tr,
1528 const struct file_operations *id,
1529 const struct file_operations *enable,
1530 const struct file_operations *filter,
1531 const struct file_operations *format)
1532 {
1533 struct ftrace_event_file *file;
1534
1535 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1536 if (!file)
1537 return -ENOMEM;
1538
1539 file->event_call = call;
1540 file->tr = tr;
1541 list_add(&file->list, &tr->events);
1542
1543 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1544 }
1545
1546 /*
1547 * Just create a decriptor for early init. A descriptor is required
1548 * for enabling events at boot. We want to enable events before
1549 * the filesystem is initialized.
1550 */
1551 static __init int
1552 __trace_early_add_new_event(struct ftrace_event_call *call,
1553 struct trace_array *tr)
1554 {
1555 struct ftrace_event_file *file;
1556
1557 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1558 if (!file)
1559 return -ENOMEM;
1560
1561 file->event_call = call;
1562 file->tr = tr;
1563 list_add(&file->list, &tr->events);
1564
1565 return 0;
1566 }
1567
1568 struct ftrace_module_file_ops;
1569 static void __add_event_to_tracers(struct ftrace_event_call *call,
1570 struct ftrace_module_file_ops *file_ops);
1571
1572 /* Add an additional event_call dynamically */
1573 int trace_add_event_call(struct ftrace_event_call *call)
1574 {
1575 int ret;
1576 mutex_lock(&event_mutex);
1577
1578 ret = __register_event(call, NULL);
1579 if (ret >= 0)
1580 __add_event_to_tracers(call, NULL);
1581
1582 mutex_unlock(&event_mutex);
1583 return ret;
1584 }
1585
1586 /*
1587 * Must be called under locking both of event_mutex and trace_event_sem.
1588 */
1589 static void __trace_remove_event_call(struct ftrace_event_call *call)
1590 {
1591 event_remove(call);
1592 trace_destroy_fields(call);
1593 destroy_preds(call);
1594 }
1595
1596 /* Remove an event_call */
1597 void trace_remove_event_call(struct ftrace_event_call *call)
1598 {
1599 mutex_lock(&event_mutex);
1600 down_write(&trace_event_sem);
1601 __trace_remove_event_call(call);
1602 up_write(&trace_event_sem);
1603 mutex_unlock(&event_mutex);
1604 }
1605
1606 #define for_each_event(event, start, end) \
1607 for (event = start; \
1608 (unsigned long)event < (unsigned long)end; \
1609 event++)
1610
1611 #ifdef CONFIG_MODULES
1612
1613 static LIST_HEAD(ftrace_module_file_list);
1614
1615 /*
1616 * Modules must own their file_operations to keep up with
1617 * reference counting.
1618 */
1619 struct ftrace_module_file_ops {
1620 struct list_head list;
1621 struct module *mod;
1622 struct file_operations id;
1623 struct file_operations enable;
1624 struct file_operations format;
1625 struct file_operations filter;
1626 };
1627
1628 static struct ftrace_module_file_ops *
1629 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1630 {
1631 /*
1632 * As event_calls are added in groups by module,
1633 * when we find one file_ops, we don't need to search for
1634 * each call in that module, as the rest should be the
1635 * same. Only search for a new one if the last one did
1636 * not match.
1637 */
1638 if (file_ops && mod == file_ops->mod)
1639 return file_ops;
1640
1641 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1642 if (file_ops->mod == mod)
1643 return file_ops;
1644 }
1645 return NULL;
1646 }
1647
1648 static struct ftrace_module_file_ops *
1649 trace_create_file_ops(struct module *mod)
1650 {
1651 struct ftrace_module_file_ops *file_ops;
1652
1653 /*
1654 * This is a bit of a PITA. To allow for correct reference
1655 * counting, modules must "own" their file_operations.
1656 * To do this, we allocate the file operations that will be
1657 * used in the event directory.
1658 */
1659
1660 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1661 if (!file_ops)
1662 return NULL;
1663
1664 file_ops->mod = mod;
1665
1666 file_ops->id = ftrace_event_id_fops;
1667 file_ops->id.owner = mod;
1668
1669 file_ops->enable = ftrace_enable_fops;
1670 file_ops->enable.owner = mod;
1671
1672 file_ops->filter = ftrace_event_filter_fops;
1673 file_ops->filter.owner = mod;
1674
1675 file_ops->format = ftrace_event_format_fops;
1676 file_ops->format.owner = mod;
1677
1678 list_add(&file_ops->list, &ftrace_module_file_list);
1679
1680 return file_ops;
1681 }
1682
1683 static void trace_module_add_events(struct module *mod)
1684 {
1685 struct ftrace_module_file_ops *file_ops = NULL;
1686 struct ftrace_event_call **call, **start, **end;
1687
1688 start = mod->trace_events;
1689 end = mod->trace_events + mod->num_trace_events;
1690
1691 if (start == end)
1692 return;
1693
1694 file_ops = trace_create_file_ops(mod);
1695 if (!file_ops)
1696 return;
1697
1698 for_each_event(call, start, end) {
1699 __register_event(*call, mod);
1700 __add_event_to_tracers(*call, file_ops);
1701 }
1702 }
1703
1704 static void trace_module_remove_events(struct module *mod)
1705 {
1706 struct ftrace_module_file_ops *file_ops;
1707 struct ftrace_event_call *call, *p;
1708 bool clear_trace = false;
1709
1710 down_write(&trace_event_sem);
1711 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1712 if (call->mod == mod) {
1713 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1714 clear_trace = true;
1715 __trace_remove_event_call(call);
1716 }
1717 }
1718
1719 /* Now free the file_operations */
1720 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1721 if (file_ops->mod == mod)
1722 break;
1723 }
1724 if (&file_ops->list != &ftrace_module_file_list) {
1725 list_del(&file_ops->list);
1726 kfree(file_ops);
1727 }
1728 up_write(&trace_event_sem);
1729
1730 /*
1731 * It is safest to reset the ring buffer if the module being unloaded
1732 * registered any events that were used. The only worry is if
1733 * a new module gets loaded, and takes on the same id as the events
1734 * of this module. When printing out the buffer, traced events left
1735 * over from this module may be passed to the new module events and
1736 * unexpected results may occur.
1737 */
1738 if (clear_trace)
1739 tracing_reset_all_online_cpus();
1740 }
1741
1742 static int trace_module_notify(struct notifier_block *self,
1743 unsigned long val, void *data)
1744 {
1745 struct module *mod = data;
1746
1747 mutex_lock(&event_mutex);
1748 switch (val) {
1749 case MODULE_STATE_COMING:
1750 trace_module_add_events(mod);
1751 break;
1752 case MODULE_STATE_GOING:
1753 trace_module_remove_events(mod);
1754 break;
1755 }
1756 mutex_unlock(&event_mutex);
1757
1758 return 0;
1759 }
1760
1761 static int
1762 __trace_add_new_mod_event(struct ftrace_event_call *call,
1763 struct trace_array *tr,
1764 struct ftrace_module_file_ops *file_ops)
1765 {
1766 return __trace_add_new_event(call, tr,
1767 &file_ops->id, &file_ops->enable,
1768 &file_ops->filter, &file_ops->format);
1769 }
1770
1771 #else
1772 static inline struct ftrace_module_file_ops *
1773 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1774 {
1775 return NULL;
1776 }
1777 static inline int trace_module_notify(struct notifier_block *self,
1778 unsigned long val, void *data)
1779 {
1780 return 0;
1781 }
1782 static inline int
1783 __trace_add_new_mod_event(struct ftrace_event_call *call,
1784 struct trace_array *tr,
1785 struct ftrace_module_file_ops *file_ops)
1786 {
1787 return -ENODEV;
1788 }
1789 #endif /* CONFIG_MODULES */
1790
1791 /* Create a new event directory structure for a trace directory. */
1792 static void
1793 __trace_add_event_dirs(struct trace_array *tr)
1794 {
1795 struct ftrace_module_file_ops *file_ops = NULL;
1796 struct ftrace_event_call *call;
1797 int ret;
1798
1799 list_for_each_entry(call, &ftrace_events, list) {
1800 if (call->mod) {
1801 /*
1802 * Directories for events by modules need to
1803 * keep module ref counts when opened (as we don't
1804 * want the module to disappear when reading one
1805 * of these files). The file_ops keep account of
1806 * the module ref count.
1807 */
1808 file_ops = find_ftrace_file_ops(file_ops, call->mod);
1809 if (!file_ops)
1810 continue; /* Warn? */
1811 ret = __trace_add_new_mod_event(call, tr, file_ops);
1812 if (ret < 0)
1813 pr_warning("Could not create directory for event %s\n",
1814 call->name);
1815 continue;
1816 }
1817 ret = __trace_add_new_event(call, tr,
1818 &ftrace_event_id_fops,
1819 &ftrace_enable_fops,
1820 &ftrace_event_filter_fops,
1821 &ftrace_event_format_fops);
1822 if (ret < 0)
1823 pr_warning("Could not create directory for event %s\n",
1824 call->name);
1825 }
1826 }
1827
1828 #ifdef CONFIG_DYNAMIC_FTRACE
1829
1830 /* Avoid typos */
1831 #define ENABLE_EVENT_STR "enable_event"
1832 #define DISABLE_EVENT_STR "disable_event"
1833
1834 struct event_probe_data {
1835 struct ftrace_event_file *file;
1836 unsigned long count;
1837 int ref;
1838 bool enable;
1839 };
1840
1841 static struct ftrace_event_file *
1842 find_event_file(struct trace_array *tr, const char *system, const char *event)
1843 {
1844 struct ftrace_event_file *file;
1845 struct ftrace_event_call *call;
1846
1847 list_for_each_entry(file, &tr->events, list) {
1848
1849 call = file->event_call;
1850
1851 if (!call->name || !call->class || !call->class->reg)
1852 continue;
1853
1854 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1855 continue;
1856
1857 if (strcmp(event, call->name) == 0 &&
1858 strcmp(system, call->class->system) == 0)
1859 return file;
1860 }
1861 return NULL;
1862 }
1863
1864 static void
1865 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1866 {
1867 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1868 struct event_probe_data *data = *pdata;
1869
1870 if (!data)
1871 return;
1872
1873 if (data->enable)
1874 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1875 else
1876 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1877 }
1878
1879 static void
1880 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1881 {
1882 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1883 struct event_probe_data *data = *pdata;
1884
1885 if (!data)
1886 return;
1887
1888 if (!data->count)
1889 return;
1890
1891 /* Skip if the event is in a state we want to switch to */
1892 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1893 return;
1894
1895 if (data->count != -1)
1896 (data->count)--;
1897
1898 event_enable_probe(ip, parent_ip, _data);
1899 }
1900
1901 static int
1902 event_enable_print(struct seq_file *m, unsigned long ip,
1903 struct ftrace_probe_ops *ops, void *_data)
1904 {
1905 struct event_probe_data *data = _data;
1906
1907 seq_printf(m, "%ps:", (void *)ip);
1908
1909 seq_printf(m, "%s:%s:%s",
1910 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1911 data->file->event_call->class->system,
1912 data->file->event_call->name);
1913
1914 if (data->count == -1)
1915 seq_printf(m, ":unlimited\n");
1916 else
1917 seq_printf(m, ":count=%ld\n", data->count);
1918
1919 return 0;
1920 }
1921
1922 static int
1923 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1924 void **_data)
1925 {
1926 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1927 struct event_probe_data *data = *pdata;
1928
1929 data->ref++;
1930 return 0;
1931 }
1932
1933 static void
1934 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
1935 void **_data)
1936 {
1937 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1938 struct event_probe_data *data = *pdata;
1939
1940 if (WARN_ON_ONCE(data->ref <= 0))
1941 return;
1942
1943 data->ref--;
1944 if (!data->ref) {
1945 /* Remove the SOFT_MODE flag */
1946 __ftrace_event_enable_disable(data->file, 0, 1);
1947 module_put(data->file->event_call->mod);
1948 kfree(data);
1949 }
1950 *pdata = NULL;
1951 }
1952
1953 static struct ftrace_probe_ops event_enable_probe_ops = {
1954 .func = event_enable_probe,
1955 .print = event_enable_print,
1956 .init = event_enable_init,
1957 .free = event_enable_free,
1958 };
1959
1960 static struct ftrace_probe_ops event_enable_count_probe_ops = {
1961 .func = event_enable_count_probe,
1962 .print = event_enable_print,
1963 .init = event_enable_init,
1964 .free = event_enable_free,
1965 };
1966
1967 static struct ftrace_probe_ops event_disable_probe_ops = {
1968 .func = event_enable_probe,
1969 .print = event_enable_print,
1970 .init = event_enable_init,
1971 .free = event_enable_free,
1972 };
1973
1974 static struct ftrace_probe_ops event_disable_count_probe_ops = {
1975 .func = event_enable_count_probe,
1976 .print = event_enable_print,
1977 .init = event_enable_init,
1978 .free = event_enable_free,
1979 };
1980
1981 static int
1982 event_enable_func(struct ftrace_hash *hash,
1983 char *glob, char *cmd, char *param, int enabled)
1984 {
1985 struct trace_array *tr = top_trace_array();
1986 struct ftrace_event_file *file;
1987 struct ftrace_probe_ops *ops;
1988 struct event_probe_data *data;
1989 const char *system;
1990 const char *event;
1991 char *number;
1992 bool enable;
1993 int ret;
1994
1995 /* hash funcs only work with set_ftrace_filter */
1996 if (!enabled)
1997 return -EINVAL;
1998
1999 if (!param)
2000 return -EINVAL;
2001
2002 system = strsep(&param, ":");
2003 if (!param)
2004 return -EINVAL;
2005
2006 event = strsep(&param, ":");
2007
2008 mutex_lock(&event_mutex);
2009
2010 ret = -EINVAL;
2011 file = find_event_file(tr, system, event);
2012 if (!file)
2013 goto out;
2014
2015 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2016
2017 if (enable)
2018 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2019 else
2020 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2021
2022 if (glob[0] == '!') {
2023 unregister_ftrace_function_probe_func(glob+1, ops);
2024 ret = 0;
2025 goto out;
2026 }
2027
2028 ret = -ENOMEM;
2029 data = kzalloc(sizeof(*data), GFP_KERNEL);
2030 if (!data)
2031 goto out;
2032
2033 data->enable = enable;
2034 data->count = -1;
2035 data->file = file;
2036
2037 if (!param)
2038 goto out_reg;
2039
2040 number = strsep(&param, ":");
2041
2042 ret = -EINVAL;
2043 if (!strlen(number))
2044 goto out_free;
2045
2046 /*
2047 * We use the callback data field (which is a pointer)
2048 * as our counter.
2049 */
2050 ret = kstrtoul(number, 0, &data->count);
2051 if (ret)
2052 goto out_free;
2053
2054 out_reg:
2055 /* Don't let event modules unload while probe registered */
2056 ret = try_module_get(file->event_call->mod);
2057 if (!ret)
2058 goto out_free;
2059
2060 ret = __ftrace_event_enable_disable(file, 1, 1);
2061 if (ret < 0)
2062 goto out_put;
2063 ret = register_ftrace_function_probe(glob, ops, data);
2064 /*
2065 * The above returns on success the # of functions enabled,
2066 * but if it didn't find any functions it returns zero.
2067 * Consider no functions a failure too.
2068 */
2069 if (!ret) {
2070 ret = -ENOENT;
2071 goto out_disable;
2072 } else if (ret < 0)
2073 goto out_disable;
2074 /* Just return zero, not the number of enabled functions */
2075 ret = 0;
2076 out:
2077 mutex_unlock(&event_mutex);
2078 return ret;
2079
2080 out_disable:
2081 __ftrace_event_enable_disable(file, 0, 1);
2082 out_put:
2083 module_put(file->event_call->mod);
2084 out_free:
2085 kfree(data);
2086 goto out;
2087 }
2088
2089 static struct ftrace_func_command event_enable_cmd = {
2090 .name = ENABLE_EVENT_STR,
2091 .func = event_enable_func,
2092 };
2093
2094 static struct ftrace_func_command event_disable_cmd = {
2095 .name = DISABLE_EVENT_STR,
2096 .func = event_enable_func,
2097 };
2098
2099 static __init int register_event_cmds(void)
2100 {
2101 int ret;
2102
2103 ret = register_ftrace_command(&event_enable_cmd);
2104 if (WARN_ON(ret < 0))
2105 return ret;
2106 ret = register_ftrace_command(&event_disable_cmd);
2107 if (WARN_ON(ret < 0))
2108 unregister_ftrace_command(&event_enable_cmd);
2109 return ret;
2110 }
2111 #else
2112 static inline int register_event_cmds(void) { return 0; }
2113 #endif /* CONFIG_DYNAMIC_FTRACE */
2114
2115 /*
2116 * The top level array has already had its ftrace_event_file
2117 * descriptors created in order to allow for early events to
2118 * be recorded. This function is called after the debugfs has been
2119 * initialized, and we now have to create the files associated
2120 * to the events.
2121 */
2122 static __init void
2123 __trace_early_add_event_dirs(struct trace_array *tr)
2124 {
2125 struct ftrace_event_file *file;
2126 int ret;
2127
2128
2129 list_for_each_entry(file, &tr->events, list) {
2130 ret = event_create_dir(tr->event_dir, file,
2131 &ftrace_event_id_fops,
2132 &ftrace_enable_fops,
2133 &ftrace_event_filter_fops,
2134 &ftrace_event_format_fops);
2135 if (ret < 0)
2136 pr_warning("Could not create directory for event %s\n",
2137 file->event_call->name);
2138 }
2139 }
2140
2141 /*
2142 * For early boot up, the top trace array requires to have
2143 * a list of events that can be enabled. This must be done before
2144 * the filesystem is set up in order to allow events to be traced
2145 * early.
2146 */
2147 static __init void
2148 __trace_early_add_events(struct trace_array *tr)
2149 {
2150 struct ftrace_event_call *call;
2151 int ret;
2152
2153 list_for_each_entry(call, &ftrace_events, list) {
2154 /* Early boot up should not have any modules loaded */
2155 if (WARN_ON_ONCE(call->mod))
2156 continue;
2157
2158 ret = __trace_early_add_new_event(call, tr);
2159 if (ret < 0)
2160 pr_warning("Could not create early event %s\n",
2161 call->name);
2162 }
2163 }
2164
2165 /* Remove the event directory structure for a trace directory. */
2166 static void
2167 __trace_remove_event_dirs(struct trace_array *tr)
2168 {
2169 struct ftrace_event_file *file, *next;
2170
2171 list_for_each_entry_safe(file, next, &tr->events, list) {
2172 list_del(&file->list);
2173 debugfs_remove_recursive(file->dir);
2174 remove_subsystem(file->system);
2175 kmem_cache_free(file_cachep, file);
2176 }
2177 }
2178
2179 static void
2180 __add_event_to_tracers(struct ftrace_event_call *call,
2181 struct ftrace_module_file_ops *file_ops)
2182 {
2183 struct trace_array *tr;
2184
2185 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2186 if (file_ops)
2187 __trace_add_new_mod_event(call, tr, file_ops);
2188 else
2189 __trace_add_new_event(call, tr,
2190 &ftrace_event_id_fops,
2191 &ftrace_enable_fops,
2192 &ftrace_event_filter_fops,
2193 &ftrace_event_format_fops);
2194 }
2195 }
2196
2197 static struct notifier_block trace_module_nb = {
2198 .notifier_call = trace_module_notify,
2199 .priority = 0,
2200 };
2201
2202 extern struct ftrace_event_call *__start_ftrace_events[];
2203 extern struct ftrace_event_call *__stop_ftrace_events[];
2204
2205 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2206
2207 static __init int setup_trace_event(char *str)
2208 {
2209 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2210 ring_buffer_expanded = true;
2211 tracing_selftest_disabled = true;
2212
2213 return 1;
2214 }
2215 __setup("trace_event=", setup_trace_event);
2216
2217 /* Expects to have event_mutex held when called */
2218 static int
2219 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2220 {
2221 struct dentry *d_events;
2222 struct dentry *entry;
2223
2224 entry = debugfs_create_file("set_event", 0644, parent,
2225 tr, &ftrace_set_event_fops);
2226 if (!entry) {
2227 pr_warning("Could not create debugfs 'set_event' entry\n");
2228 return -ENOMEM;
2229 }
2230
2231 d_events = debugfs_create_dir("events", parent);
2232 if (!d_events) {
2233 pr_warning("Could not create debugfs 'events' directory\n");
2234 return -ENOMEM;
2235 }
2236
2237 /* ring buffer internal formats */
2238 trace_create_file("header_page", 0444, d_events,
2239 ring_buffer_print_page_header,
2240 &ftrace_show_header_fops);
2241
2242 trace_create_file("header_event", 0444, d_events,
2243 ring_buffer_print_entry_header,
2244 &ftrace_show_header_fops);
2245
2246 trace_create_file("enable", 0644, d_events,
2247 tr, &ftrace_tr_enable_fops);
2248
2249 tr->event_dir = d_events;
2250
2251 return 0;
2252 }
2253
2254 /**
2255 * event_trace_add_tracer - add a instance of a trace_array to events
2256 * @parent: The parent dentry to place the files/directories for events in
2257 * @tr: The trace array associated with these events
2258 *
2259 * When a new instance is created, it needs to set up its events
2260 * directory, as well as other files associated with events. It also
2261 * creates the event hierachry in the @parent/events directory.
2262 *
2263 * Returns 0 on success.
2264 */
2265 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2266 {
2267 int ret;
2268
2269 mutex_lock(&event_mutex);
2270
2271 ret = create_event_toplevel_files(parent, tr);
2272 if (ret)
2273 goto out_unlock;
2274
2275 down_write(&trace_event_sem);
2276 __trace_add_event_dirs(tr);
2277 up_write(&trace_event_sem);
2278
2279 out_unlock:
2280 mutex_unlock(&event_mutex);
2281
2282 return ret;
2283 }
2284
2285 /*
2286 * The top trace array already had its file descriptors created.
2287 * Now the files themselves need to be created.
2288 */
2289 static __init int
2290 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2291 {
2292 int ret;
2293
2294 mutex_lock(&event_mutex);
2295
2296 ret = create_event_toplevel_files(parent, tr);
2297 if (ret)
2298 goto out_unlock;
2299
2300 down_write(&trace_event_sem);
2301 __trace_early_add_event_dirs(tr);
2302 up_write(&trace_event_sem);
2303
2304 out_unlock:
2305 mutex_unlock(&event_mutex);
2306
2307 return ret;
2308 }
2309
2310 int event_trace_del_tracer(struct trace_array *tr)
2311 {
2312 /* Disable any running events */
2313 __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2314
2315 mutex_lock(&event_mutex);
2316
2317 down_write(&trace_event_sem);
2318 __trace_remove_event_dirs(tr);
2319 debugfs_remove_recursive(tr->event_dir);
2320 up_write(&trace_event_sem);
2321
2322 tr->event_dir = NULL;
2323
2324 mutex_unlock(&event_mutex);
2325
2326 return 0;
2327 }
2328
2329 static __init int event_trace_memsetup(void)
2330 {
2331 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2332 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2333 return 0;
2334 }
2335
2336 static __init int event_trace_enable(void)
2337 {
2338 struct trace_array *tr = top_trace_array();
2339 struct ftrace_event_call **iter, *call;
2340 char *buf = bootup_event_buf;
2341 char *token;
2342 int ret;
2343
2344 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2345
2346 call = *iter;
2347 ret = event_init(call);
2348 if (!ret)
2349 list_add(&call->list, &ftrace_events);
2350 }
2351
2352 /*
2353 * We need the top trace array to have a working set of trace
2354 * points at early init, before the debug files and directories
2355 * are created. Create the file entries now, and attach them
2356 * to the actual file dentries later.
2357 */
2358 __trace_early_add_events(tr);
2359
2360 while (true) {
2361 token = strsep(&buf, ",");
2362
2363 if (!token)
2364 break;
2365 if (!*token)
2366 continue;
2367
2368 ret = ftrace_set_clr_event(tr, token, 1);
2369 if (ret)
2370 pr_warn("Failed to enable trace event: %s\n", token);
2371 }
2372
2373 trace_printk_start_comm();
2374
2375 register_event_cmds();
2376
2377 return 0;
2378 }
2379
2380 static __init int event_trace_init(void)
2381 {
2382 struct trace_array *tr;
2383 struct dentry *d_tracer;
2384 struct dentry *entry;
2385 int ret;
2386
2387 tr = top_trace_array();
2388
2389 d_tracer = tracing_init_dentry();
2390 if (!d_tracer)
2391 return 0;
2392
2393 entry = debugfs_create_file("available_events", 0444, d_tracer,
2394 tr, &ftrace_avail_fops);
2395 if (!entry)
2396 pr_warning("Could not create debugfs "
2397 "'available_events' entry\n");
2398
2399 if (trace_define_common_fields())
2400 pr_warning("tracing: Failed to allocate common fields");
2401
2402 ret = early_event_add_tracer(d_tracer, tr);
2403 if (ret)
2404 return ret;
2405
2406 ret = register_module_notifier(&trace_module_nb);
2407 if (ret)
2408 pr_warning("Failed to register trace events module notifier\n");
2409
2410 return 0;
2411 }
2412 early_initcall(event_trace_memsetup);
2413 core_initcall(event_trace_enable);
2414 fs_initcall(event_trace_init);
2415
2416 #ifdef CONFIG_FTRACE_STARTUP_TEST
2417
2418 static DEFINE_SPINLOCK(test_spinlock);
2419 static DEFINE_SPINLOCK(test_spinlock_irq);
2420 static DEFINE_MUTEX(test_mutex);
2421
2422 static __init void test_work(struct work_struct *dummy)
2423 {
2424 spin_lock(&test_spinlock);
2425 spin_lock_irq(&test_spinlock_irq);
2426 udelay(1);
2427 spin_unlock_irq(&test_spinlock_irq);
2428 spin_unlock(&test_spinlock);
2429
2430 mutex_lock(&test_mutex);
2431 msleep(1);
2432 mutex_unlock(&test_mutex);
2433 }
2434
2435 static __init int event_test_thread(void *unused)
2436 {
2437 void *test_malloc;
2438
2439 test_malloc = kmalloc(1234, GFP_KERNEL);
2440 if (!test_malloc)
2441 pr_info("failed to kmalloc\n");
2442
2443 schedule_on_each_cpu(test_work);
2444
2445 kfree(test_malloc);
2446
2447 set_current_state(TASK_INTERRUPTIBLE);
2448 while (!kthread_should_stop())
2449 schedule();
2450
2451 return 0;
2452 }
2453
2454 /*
2455 * Do various things that may trigger events.
2456 */
2457 static __init void event_test_stuff(void)
2458 {
2459 struct task_struct *test_thread;
2460
2461 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2462 msleep(1);
2463 kthread_stop(test_thread);
2464 }
2465
2466 /*
2467 * For every trace event defined, we will test each trace point separately,
2468 * and then by groups, and finally all trace points.
2469 */
2470 static __init void event_trace_self_tests(void)
2471 {
2472 struct ftrace_subsystem_dir *dir;
2473 struct ftrace_event_file *file;
2474 struct ftrace_event_call *call;
2475 struct event_subsystem *system;
2476 struct trace_array *tr;
2477 int ret;
2478
2479 tr = top_trace_array();
2480
2481 pr_info("Running tests on trace events:\n");
2482
2483 list_for_each_entry(file, &tr->events, list) {
2484
2485 call = file->event_call;
2486
2487 /* Only test those that have a probe */
2488 if (!call->class || !call->class->probe)
2489 continue;
2490
2491 /*
2492 * Testing syscall events here is pretty useless, but
2493 * we still do it if configured. But this is time consuming.
2494 * What we really need is a user thread to perform the
2495 * syscalls as we test.
2496 */
2497 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2498 if (call->class->system &&
2499 strcmp(call->class->system, "syscalls") == 0)
2500 continue;
2501 #endif
2502
2503 pr_info("Testing event %s: ", call->name);
2504
2505 /*
2506 * If an event is already enabled, someone is using
2507 * it and the self test should not be on.
2508 */
2509 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2510 pr_warning("Enabled event during self test!\n");
2511 WARN_ON_ONCE(1);
2512 continue;
2513 }
2514
2515 ftrace_event_enable_disable(file, 1);
2516 event_test_stuff();
2517 ftrace_event_enable_disable(file, 0);
2518
2519 pr_cont("OK\n");
2520 }
2521
2522 /* Now test at the sub system level */
2523
2524 pr_info("Running tests on trace event systems:\n");
2525
2526 list_for_each_entry(dir, &tr->systems, list) {
2527
2528 system = dir->subsystem;
2529
2530 /* the ftrace system is special, skip it */
2531 if (strcmp(system->name, "ftrace") == 0)
2532 continue;
2533
2534 pr_info("Testing event system %s: ", system->name);
2535
2536 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2537 if (WARN_ON_ONCE(ret)) {
2538 pr_warning("error enabling system %s\n",
2539 system->name);
2540 continue;
2541 }
2542
2543 event_test_stuff();
2544
2545 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2546 if (WARN_ON_ONCE(ret)) {
2547 pr_warning("error disabling system %s\n",
2548 system->name);
2549 continue;
2550 }
2551
2552 pr_cont("OK\n");
2553 }
2554
2555 /* Test with all events enabled */
2556
2557 pr_info("Running tests on all trace events:\n");
2558 pr_info("Testing all events: ");
2559
2560 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2561 if (WARN_ON_ONCE(ret)) {
2562 pr_warning("error enabling all events\n");
2563 return;
2564 }
2565
2566 event_test_stuff();
2567
2568 /* reset sysname */
2569 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2570 if (WARN_ON_ONCE(ret)) {
2571 pr_warning("error disabling all events\n");
2572 return;
2573 }
2574
2575 pr_cont("OK\n");
2576 }
2577
2578 #ifdef CONFIG_FUNCTION_TRACER
2579
2580 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2581
2582 static void
2583 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2584 struct ftrace_ops *op, struct pt_regs *pt_regs)
2585 {
2586 struct ring_buffer_event *event;
2587 struct ring_buffer *buffer;
2588 struct ftrace_entry *entry;
2589 unsigned long flags;
2590 long disabled;
2591 int cpu;
2592 int pc;
2593
2594 pc = preempt_count();
2595 preempt_disable_notrace();
2596 cpu = raw_smp_processor_id();
2597 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2598
2599 if (disabled != 1)
2600 goto out;
2601
2602 local_save_flags(flags);
2603
2604 event = trace_current_buffer_lock_reserve(&buffer,
2605 TRACE_FN, sizeof(*entry),
2606 flags, pc);
2607 if (!event)
2608 goto out;
2609 entry = ring_buffer_event_data(event);
2610 entry->ip = ip;
2611 entry->parent_ip = parent_ip;
2612
2613 trace_buffer_unlock_commit(buffer, event, flags, pc);
2614
2615 out:
2616 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2617 preempt_enable_notrace();
2618 }
2619
2620 static struct ftrace_ops trace_ops __initdata =
2621 {
2622 .func = function_test_events_call,
2623 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2624 };
2625
2626 static __init void event_trace_self_test_with_function(void)
2627 {
2628 int ret;
2629 ret = register_ftrace_function(&trace_ops);
2630 if (WARN_ON(ret < 0)) {
2631 pr_info("Failed to enable function tracer for event tests\n");
2632 return;
2633 }
2634 pr_info("Running tests again, along with the function tracer\n");
2635 event_trace_self_tests();
2636 unregister_ftrace_function(&trace_ops);
2637 }
2638 #else
2639 static __init void event_trace_self_test_with_function(void)
2640 {
2641 }
2642 #endif
2643
2644 static __init int event_trace_self_tests_init(void)
2645 {
2646 if (!tracing_selftest_disabled) {
2647 event_trace_self_tests();
2648 event_trace_self_test_with_function();
2649 }
2650
2651 return 0;
2652 }
2653
2654 late_initcall(event_trace_self_tests_init);
2655
2656 #endif
This page took 0.084848 seconds and 5 git commands to generate.