Revert "tracing: Move event storage for array from macro to standalone function"
[deliverable/linux.git] / kernel / trace / trace_events.c
1 /*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
9 */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_common_fields);
38
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43
44 #define SYSTEM_FL_FREE_NAME (1 << 31)
45
46 static inline int system_refcount(struct event_subsystem *system)
47 {
48 return system->ref_count & ~SYSTEM_FL_FREE_NAME;
49 }
50
51 static int system_refcount_inc(struct event_subsystem *system)
52 {
53 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
54 }
55
56 static int system_refcount_dec(struct event_subsystem *system)
57 {
58 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
59 }
60
61 /* Double loops, do not use break, only goto's work */
62 #define do_for_each_event_file(tr, file) \
63 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
64 list_for_each_entry(file, &tr->events, list)
65
66 #define do_for_each_event_file_safe(tr, file) \
67 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
68 struct ftrace_event_file *___n; \
69 list_for_each_entry_safe(file, ___n, &tr->events, list)
70
71 #define while_for_each_event_file() \
72 }
73
74 static struct list_head *
75 trace_get_fields(struct ftrace_event_call *event_call)
76 {
77 if (!event_call->class->get_fields)
78 return &event_call->class->fields;
79 return event_call->class->get_fields(event_call);
80 }
81
82 static struct ftrace_event_field *
83 __find_event_field(struct list_head *head, char *name)
84 {
85 struct ftrace_event_field *field;
86
87 list_for_each_entry(field, head, link) {
88 if (!strcmp(field->name, name))
89 return field;
90 }
91
92 return NULL;
93 }
94
95 struct ftrace_event_field *
96 trace_find_event_field(struct ftrace_event_call *call, char *name)
97 {
98 struct ftrace_event_field *field;
99 struct list_head *head;
100
101 field = __find_event_field(&ftrace_common_fields, name);
102 if (field)
103 return field;
104
105 head = trace_get_fields(call);
106 return __find_event_field(head, name);
107 }
108
109 static int __trace_define_field(struct list_head *head, const char *type,
110 const char *name, int offset, int size,
111 int is_signed, int filter_type)
112 {
113 struct ftrace_event_field *field;
114
115 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
116 if (!field)
117 return -ENOMEM;
118
119 field->name = name;
120 field->type = type;
121
122 if (filter_type == FILTER_OTHER)
123 field->filter_type = filter_assign_type(type);
124 else
125 field->filter_type = filter_type;
126
127 field->offset = offset;
128 field->size = size;
129 field->is_signed = is_signed;
130
131 list_add(&field->link, head);
132
133 return 0;
134 }
135
136 int trace_define_field(struct ftrace_event_call *call, const char *type,
137 const char *name, int offset, int size, int is_signed,
138 int filter_type)
139 {
140 struct list_head *head;
141
142 if (WARN_ON(!call->class))
143 return 0;
144
145 head = trace_get_fields(call);
146 return __trace_define_field(head, type, name, offset, size,
147 is_signed, filter_type);
148 }
149 EXPORT_SYMBOL_GPL(trace_define_field);
150
151 #define __common_field(type, item) \
152 ret = __trace_define_field(&ftrace_common_fields, #type, \
153 "common_" #item, \
154 offsetof(typeof(ent), item), \
155 sizeof(ent.item), \
156 is_signed_type(type), FILTER_OTHER); \
157 if (ret) \
158 return ret;
159
160 static int trace_define_common_fields(void)
161 {
162 int ret;
163 struct trace_entry ent;
164
165 __common_field(unsigned short, type);
166 __common_field(unsigned char, flags);
167 __common_field(unsigned char, preempt_count);
168 __common_field(int, pid);
169
170 return ret;
171 }
172
173 static void trace_destroy_fields(struct ftrace_event_call *call)
174 {
175 struct ftrace_event_field *field, *next;
176 struct list_head *head;
177
178 head = trace_get_fields(call);
179 list_for_each_entry_safe(field, next, head, link) {
180 list_del(&field->link);
181 kmem_cache_free(field_cachep, field);
182 }
183 }
184
185 int trace_event_raw_init(struct ftrace_event_call *call)
186 {
187 int id;
188
189 id = register_ftrace_event(&call->event);
190 if (!id)
191 return -ENODEV;
192
193 return 0;
194 }
195 EXPORT_SYMBOL_GPL(trace_event_raw_init);
196
197 void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
198 struct ftrace_event_file *ftrace_file,
199 unsigned long len)
200 {
201 struct ftrace_event_call *event_call = ftrace_file->event_call;
202
203 local_save_flags(fbuffer->flags);
204 fbuffer->pc = preempt_count();
205 fbuffer->ftrace_file = ftrace_file;
206
207 fbuffer->event =
208 trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
209 event_call->event.type, len,
210 fbuffer->flags, fbuffer->pc);
211 if (!fbuffer->event)
212 return NULL;
213
214 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
215 return fbuffer->entry;
216 }
217 EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
218
219 void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
220 {
221 event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
222 fbuffer->event, fbuffer->entry,
223 fbuffer->flags, fbuffer->pc);
224 }
225 EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
226
227 int ftrace_event_reg(struct ftrace_event_call *call,
228 enum trace_reg type, void *data)
229 {
230 struct ftrace_event_file *file = data;
231
232 switch (type) {
233 case TRACE_REG_REGISTER:
234 return tracepoint_probe_register(call->name,
235 call->class->probe,
236 file);
237 case TRACE_REG_UNREGISTER:
238 tracepoint_probe_unregister(call->name,
239 call->class->probe,
240 file);
241 return 0;
242
243 #ifdef CONFIG_PERF_EVENTS
244 case TRACE_REG_PERF_REGISTER:
245 return tracepoint_probe_register(call->name,
246 call->class->perf_probe,
247 call);
248 case TRACE_REG_PERF_UNREGISTER:
249 tracepoint_probe_unregister(call->name,
250 call->class->perf_probe,
251 call);
252 return 0;
253 case TRACE_REG_PERF_OPEN:
254 case TRACE_REG_PERF_CLOSE:
255 case TRACE_REG_PERF_ADD:
256 case TRACE_REG_PERF_DEL:
257 return 0;
258 #endif
259 }
260 return 0;
261 }
262 EXPORT_SYMBOL_GPL(ftrace_event_reg);
263
264 void trace_event_enable_cmd_record(bool enable)
265 {
266 struct ftrace_event_file *file;
267 struct trace_array *tr;
268
269 mutex_lock(&event_mutex);
270 do_for_each_event_file(tr, file) {
271
272 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
273 continue;
274
275 if (enable) {
276 tracing_start_cmdline_record();
277 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
278 } else {
279 tracing_stop_cmdline_record();
280 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
281 }
282 } while_for_each_event_file();
283 mutex_unlock(&event_mutex);
284 }
285
286 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
287 int enable, int soft_disable)
288 {
289 struct ftrace_event_call *call = file->event_call;
290 int ret = 0;
291 int disable;
292
293 switch (enable) {
294 case 0:
295 /*
296 * When soft_disable is set and enable is cleared, the sm_ref
297 * reference counter is decremented. If it reaches 0, we want
298 * to clear the SOFT_DISABLED flag but leave the event in the
299 * state that it was. That is, if the event was enabled and
300 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
301 * is set we do not want the event to be enabled before we
302 * clear the bit.
303 *
304 * When soft_disable is not set but the SOFT_MODE flag is,
305 * we do nothing. Do not disable the tracepoint, otherwise
306 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
307 */
308 if (soft_disable) {
309 if (atomic_dec_return(&file->sm_ref) > 0)
310 break;
311 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
312 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
313 } else
314 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
315
316 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
317 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
318 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
319 tracing_stop_cmdline_record();
320 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
321 }
322 call->class->reg(call, TRACE_REG_UNREGISTER, file);
323 }
324 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
325 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
326 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
327 else
328 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
329 break;
330 case 1:
331 /*
332 * When soft_disable is set and enable is set, we want to
333 * register the tracepoint for the event, but leave the event
334 * as is. That means, if the event was already enabled, we do
335 * nothing (but set SOFT_MODE). If the event is disabled, we
336 * set SOFT_DISABLED before enabling the event tracepoint, so
337 * it still seems to be disabled.
338 */
339 if (!soft_disable)
340 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
341 else {
342 if (atomic_inc_return(&file->sm_ref) > 1)
343 break;
344 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
345 }
346
347 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
348
349 /* Keep the event disabled, when going to SOFT_MODE. */
350 if (soft_disable)
351 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
352
353 if (trace_flags & TRACE_ITER_RECORD_CMD) {
354 tracing_start_cmdline_record();
355 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
356 }
357 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
358 if (ret) {
359 tracing_stop_cmdline_record();
360 pr_info("event trace: Could not enable event "
361 "%s\n", call->name);
362 break;
363 }
364 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
365
366 /* WAS_ENABLED gets set but never cleared. */
367 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
368 }
369 break;
370 }
371
372 return ret;
373 }
374
375 int trace_event_enable_disable(struct ftrace_event_file *file,
376 int enable, int soft_disable)
377 {
378 return __ftrace_event_enable_disable(file, enable, soft_disable);
379 }
380
381 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
382 int enable)
383 {
384 return __ftrace_event_enable_disable(file, enable, 0);
385 }
386
387 static void ftrace_clear_events(struct trace_array *tr)
388 {
389 struct ftrace_event_file *file;
390
391 mutex_lock(&event_mutex);
392 list_for_each_entry(file, &tr->events, list) {
393 ftrace_event_enable_disable(file, 0);
394 }
395 mutex_unlock(&event_mutex);
396 }
397
398 static void __put_system(struct event_subsystem *system)
399 {
400 struct event_filter *filter = system->filter;
401
402 WARN_ON_ONCE(system_refcount(system) == 0);
403 if (system_refcount_dec(system))
404 return;
405
406 list_del(&system->list);
407
408 if (filter) {
409 kfree(filter->filter_string);
410 kfree(filter);
411 }
412 if (system->ref_count & SYSTEM_FL_FREE_NAME)
413 kfree(system->name);
414 kfree(system);
415 }
416
417 static void __get_system(struct event_subsystem *system)
418 {
419 WARN_ON_ONCE(system_refcount(system) == 0);
420 system_refcount_inc(system);
421 }
422
423 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
424 {
425 WARN_ON_ONCE(dir->ref_count == 0);
426 dir->ref_count++;
427 __get_system(dir->subsystem);
428 }
429
430 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
431 {
432 WARN_ON_ONCE(dir->ref_count == 0);
433 /* If the subsystem is about to be freed, the dir must be too */
434 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
435
436 __put_system(dir->subsystem);
437 if (!--dir->ref_count)
438 kfree(dir);
439 }
440
441 static void put_system(struct ftrace_subsystem_dir *dir)
442 {
443 mutex_lock(&event_mutex);
444 __put_system_dir(dir);
445 mutex_unlock(&event_mutex);
446 }
447
448 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
449 {
450 if (!dir)
451 return;
452
453 if (!--dir->nr_events) {
454 debugfs_remove_recursive(dir->entry);
455 list_del(&dir->list);
456 __put_system_dir(dir);
457 }
458 }
459
460 static void remove_event_file_dir(struct ftrace_event_file *file)
461 {
462 struct dentry *dir = file->dir;
463 struct dentry *child;
464
465 if (dir) {
466 spin_lock(&dir->d_lock); /* probably unneeded */
467 list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
468 if (child->d_inode) /* probably unneeded */
469 child->d_inode->i_private = NULL;
470 }
471 spin_unlock(&dir->d_lock);
472
473 debugfs_remove_recursive(dir);
474 }
475
476 list_del(&file->list);
477 remove_subsystem(file->system);
478 kmem_cache_free(file_cachep, file);
479 }
480
481 /*
482 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
483 */
484 static int
485 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
486 const char *sub, const char *event, int set)
487 {
488 struct ftrace_event_file *file;
489 struct ftrace_event_call *call;
490 int ret = -EINVAL;
491
492 list_for_each_entry(file, &tr->events, list) {
493
494 call = file->event_call;
495
496 if (!call->name || !call->class || !call->class->reg)
497 continue;
498
499 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
500 continue;
501
502 if (match &&
503 strcmp(match, call->name) != 0 &&
504 strcmp(match, call->class->system) != 0)
505 continue;
506
507 if (sub && strcmp(sub, call->class->system) != 0)
508 continue;
509
510 if (event && strcmp(event, call->name) != 0)
511 continue;
512
513 ftrace_event_enable_disable(file, set);
514
515 ret = 0;
516 }
517
518 return ret;
519 }
520
521 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
522 const char *sub, const char *event, int set)
523 {
524 int ret;
525
526 mutex_lock(&event_mutex);
527 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
528 mutex_unlock(&event_mutex);
529
530 return ret;
531 }
532
533 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
534 {
535 char *event = NULL, *sub = NULL, *match;
536
537 /*
538 * The buf format can be <subsystem>:<event-name>
539 * *:<event-name> means any event by that name.
540 * :<event-name> is the same.
541 *
542 * <subsystem>:* means all events in that subsystem
543 * <subsystem>: means the same.
544 *
545 * <name> (no ':') means all events in a subsystem with
546 * the name <name> or any event that matches <name>
547 */
548
549 match = strsep(&buf, ":");
550 if (buf) {
551 sub = match;
552 event = buf;
553 match = NULL;
554
555 if (!strlen(sub) || strcmp(sub, "*") == 0)
556 sub = NULL;
557 if (!strlen(event) || strcmp(event, "*") == 0)
558 event = NULL;
559 }
560
561 return __ftrace_set_clr_event(tr, match, sub, event, set);
562 }
563
564 /**
565 * trace_set_clr_event - enable or disable an event
566 * @system: system name to match (NULL for any system)
567 * @event: event name to match (NULL for all events, within system)
568 * @set: 1 to enable, 0 to disable
569 *
570 * This is a way for other parts of the kernel to enable or disable
571 * event recording.
572 *
573 * Returns 0 on success, -EINVAL if the parameters do not match any
574 * registered events.
575 */
576 int trace_set_clr_event(const char *system, const char *event, int set)
577 {
578 struct trace_array *tr = top_trace_array();
579
580 return __ftrace_set_clr_event(tr, NULL, system, event, set);
581 }
582 EXPORT_SYMBOL_GPL(trace_set_clr_event);
583
584 /* 128 should be much more than enough */
585 #define EVENT_BUF_SIZE 127
586
587 static ssize_t
588 ftrace_event_write(struct file *file, const char __user *ubuf,
589 size_t cnt, loff_t *ppos)
590 {
591 struct trace_parser parser;
592 struct seq_file *m = file->private_data;
593 struct trace_array *tr = m->private;
594 ssize_t read, ret;
595
596 if (!cnt)
597 return 0;
598
599 ret = tracing_update_buffers();
600 if (ret < 0)
601 return ret;
602
603 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
604 return -ENOMEM;
605
606 read = trace_get_user(&parser, ubuf, cnt, ppos);
607
608 if (read >= 0 && trace_parser_loaded((&parser))) {
609 int set = 1;
610
611 if (*parser.buffer == '!')
612 set = 0;
613
614 parser.buffer[parser.idx] = 0;
615
616 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
617 if (ret)
618 goto out_put;
619 }
620
621 ret = read;
622
623 out_put:
624 trace_parser_put(&parser);
625
626 return ret;
627 }
628
629 static void *
630 t_next(struct seq_file *m, void *v, loff_t *pos)
631 {
632 struct ftrace_event_file *file = v;
633 struct ftrace_event_call *call;
634 struct trace_array *tr = m->private;
635
636 (*pos)++;
637
638 list_for_each_entry_continue(file, &tr->events, list) {
639 call = file->event_call;
640 /*
641 * The ftrace subsystem is for showing formats only.
642 * They can not be enabled or disabled via the event files.
643 */
644 if (call->class && call->class->reg)
645 return file;
646 }
647
648 return NULL;
649 }
650
651 static void *t_start(struct seq_file *m, loff_t *pos)
652 {
653 struct ftrace_event_file *file;
654 struct trace_array *tr = m->private;
655 loff_t l;
656
657 mutex_lock(&event_mutex);
658
659 file = list_entry(&tr->events, struct ftrace_event_file, list);
660 for (l = 0; l <= *pos; ) {
661 file = t_next(m, file, &l);
662 if (!file)
663 break;
664 }
665 return file;
666 }
667
668 static void *
669 s_next(struct seq_file *m, void *v, loff_t *pos)
670 {
671 struct ftrace_event_file *file = v;
672 struct trace_array *tr = m->private;
673
674 (*pos)++;
675
676 list_for_each_entry_continue(file, &tr->events, list) {
677 if (file->flags & FTRACE_EVENT_FL_ENABLED)
678 return file;
679 }
680
681 return NULL;
682 }
683
684 static void *s_start(struct seq_file *m, loff_t *pos)
685 {
686 struct ftrace_event_file *file;
687 struct trace_array *tr = m->private;
688 loff_t l;
689
690 mutex_lock(&event_mutex);
691
692 file = list_entry(&tr->events, struct ftrace_event_file, list);
693 for (l = 0; l <= *pos; ) {
694 file = s_next(m, file, &l);
695 if (!file)
696 break;
697 }
698 return file;
699 }
700
701 static int t_show(struct seq_file *m, void *v)
702 {
703 struct ftrace_event_file *file = v;
704 struct ftrace_event_call *call = file->event_call;
705
706 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
707 seq_printf(m, "%s:", call->class->system);
708 seq_printf(m, "%s\n", call->name);
709
710 return 0;
711 }
712
713 static void t_stop(struct seq_file *m, void *p)
714 {
715 mutex_unlock(&event_mutex);
716 }
717
718 static ssize_t
719 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
720 loff_t *ppos)
721 {
722 struct ftrace_event_file *file;
723 unsigned long flags;
724 char buf[4] = "0";
725
726 mutex_lock(&event_mutex);
727 file = event_file_data(filp);
728 if (likely(file))
729 flags = file->flags;
730 mutex_unlock(&event_mutex);
731
732 if (!file)
733 return -ENODEV;
734
735 if (flags & FTRACE_EVENT_FL_ENABLED &&
736 !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
737 strcpy(buf, "1");
738
739 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
740 flags & FTRACE_EVENT_FL_SOFT_MODE)
741 strcat(buf, "*");
742
743 strcat(buf, "\n");
744
745 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
746 }
747
748 static ssize_t
749 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
750 loff_t *ppos)
751 {
752 struct ftrace_event_file *file;
753 unsigned long val;
754 int ret;
755
756 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
757 if (ret)
758 return ret;
759
760 ret = tracing_update_buffers();
761 if (ret < 0)
762 return ret;
763
764 switch (val) {
765 case 0:
766 case 1:
767 ret = -ENODEV;
768 mutex_lock(&event_mutex);
769 file = event_file_data(filp);
770 if (likely(file))
771 ret = ftrace_event_enable_disable(file, val);
772 mutex_unlock(&event_mutex);
773 break;
774
775 default:
776 return -EINVAL;
777 }
778
779 *ppos += cnt;
780
781 return ret ? ret : cnt;
782 }
783
784 static ssize_t
785 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
786 loff_t *ppos)
787 {
788 const char set_to_char[4] = { '?', '0', '1', 'X' };
789 struct ftrace_subsystem_dir *dir = filp->private_data;
790 struct event_subsystem *system = dir->subsystem;
791 struct ftrace_event_call *call;
792 struct ftrace_event_file *file;
793 struct trace_array *tr = dir->tr;
794 char buf[2];
795 int set = 0;
796 int ret;
797
798 mutex_lock(&event_mutex);
799 list_for_each_entry(file, &tr->events, list) {
800 call = file->event_call;
801 if (!call->name || !call->class || !call->class->reg)
802 continue;
803
804 if (system && strcmp(call->class->system, system->name) != 0)
805 continue;
806
807 /*
808 * We need to find out if all the events are set
809 * or if all events or cleared, or if we have
810 * a mixture.
811 */
812 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
813
814 /*
815 * If we have a mixture, no need to look further.
816 */
817 if (set == 3)
818 break;
819 }
820 mutex_unlock(&event_mutex);
821
822 buf[0] = set_to_char[set];
823 buf[1] = '\n';
824
825 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
826
827 return ret;
828 }
829
830 static ssize_t
831 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
832 loff_t *ppos)
833 {
834 struct ftrace_subsystem_dir *dir = filp->private_data;
835 struct event_subsystem *system = dir->subsystem;
836 const char *name = NULL;
837 unsigned long val;
838 ssize_t ret;
839
840 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
841 if (ret)
842 return ret;
843
844 ret = tracing_update_buffers();
845 if (ret < 0)
846 return ret;
847
848 if (val != 0 && val != 1)
849 return -EINVAL;
850
851 /*
852 * Opening of "enable" adds a ref count to system,
853 * so the name is safe to use.
854 */
855 if (system)
856 name = system->name;
857
858 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
859 if (ret)
860 goto out;
861
862 ret = cnt;
863
864 out:
865 *ppos += cnt;
866
867 return ret;
868 }
869
870 enum {
871 FORMAT_HEADER = 1,
872 FORMAT_FIELD_SEPERATOR = 2,
873 FORMAT_PRINTFMT = 3,
874 };
875
876 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
877 {
878 struct ftrace_event_call *call = event_file_data(m->private);
879 struct list_head *common_head = &ftrace_common_fields;
880 struct list_head *head = trace_get_fields(call);
881 struct list_head *node = v;
882
883 (*pos)++;
884
885 switch ((unsigned long)v) {
886 case FORMAT_HEADER:
887 node = common_head;
888 break;
889
890 case FORMAT_FIELD_SEPERATOR:
891 node = head;
892 break;
893
894 case FORMAT_PRINTFMT:
895 /* all done */
896 return NULL;
897 }
898
899 node = node->prev;
900 if (node == common_head)
901 return (void *)FORMAT_FIELD_SEPERATOR;
902 else if (node == head)
903 return (void *)FORMAT_PRINTFMT;
904 else
905 return node;
906 }
907
908 static int f_show(struct seq_file *m, void *v)
909 {
910 struct ftrace_event_call *call = event_file_data(m->private);
911 struct ftrace_event_field *field;
912 const char *array_descriptor;
913
914 switch ((unsigned long)v) {
915 case FORMAT_HEADER:
916 seq_printf(m, "name: %s\n", call->name);
917 seq_printf(m, "ID: %d\n", call->event.type);
918 seq_printf(m, "format:\n");
919 return 0;
920
921 case FORMAT_FIELD_SEPERATOR:
922 seq_putc(m, '\n');
923 return 0;
924
925 case FORMAT_PRINTFMT:
926 seq_printf(m, "\nprint fmt: %s\n",
927 call->print_fmt);
928 return 0;
929 }
930
931 field = list_entry(v, struct ftrace_event_field, link);
932 /*
933 * Smartly shows the array type(except dynamic array).
934 * Normal:
935 * field:TYPE VAR
936 * If TYPE := TYPE[LEN], it is shown:
937 * field:TYPE VAR[LEN]
938 */
939 array_descriptor = strchr(field->type, '[');
940
941 if (!strncmp(field->type, "__data_loc", 10))
942 array_descriptor = NULL;
943
944 if (!array_descriptor)
945 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
946 field->type, field->name, field->offset,
947 field->size, !!field->is_signed);
948 else
949 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
950 (int)(array_descriptor - field->type),
951 field->type, field->name,
952 array_descriptor, field->offset,
953 field->size, !!field->is_signed);
954
955 return 0;
956 }
957
958 static void *f_start(struct seq_file *m, loff_t *pos)
959 {
960 void *p = (void *)FORMAT_HEADER;
961 loff_t l = 0;
962
963 /* ->stop() is called even if ->start() fails */
964 mutex_lock(&event_mutex);
965 if (!event_file_data(m->private))
966 return ERR_PTR(-ENODEV);
967
968 while (l < *pos && p)
969 p = f_next(m, p, &l);
970
971 return p;
972 }
973
974 static void f_stop(struct seq_file *m, void *p)
975 {
976 mutex_unlock(&event_mutex);
977 }
978
979 static const struct seq_operations trace_format_seq_ops = {
980 .start = f_start,
981 .next = f_next,
982 .stop = f_stop,
983 .show = f_show,
984 };
985
986 static int trace_format_open(struct inode *inode, struct file *file)
987 {
988 struct seq_file *m;
989 int ret;
990
991 ret = seq_open(file, &trace_format_seq_ops);
992 if (ret < 0)
993 return ret;
994
995 m = file->private_data;
996 m->private = file;
997
998 return 0;
999 }
1000
1001 static ssize_t
1002 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1003 {
1004 int id = (long)event_file_data(filp);
1005 char buf[32];
1006 int len;
1007
1008 if (*ppos)
1009 return 0;
1010
1011 if (unlikely(!id))
1012 return -ENODEV;
1013
1014 len = sprintf(buf, "%d\n", id);
1015
1016 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1017 }
1018
1019 static ssize_t
1020 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1021 loff_t *ppos)
1022 {
1023 struct ftrace_event_file *file;
1024 struct trace_seq *s;
1025 int r = -ENODEV;
1026
1027 if (*ppos)
1028 return 0;
1029
1030 s = kmalloc(sizeof(*s), GFP_KERNEL);
1031
1032 if (!s)
1033 return -ENOMEM;
1034
1035 trace_seq_init(s);
1036
1037 mutex_lock(&event_mutex);
1038 file = event_file_data(filp);
1039 if (file)
1040 print_event_filter(file, s);
1041 mutex_unlock(&event_mutex);
1042
1043 if (file)
1044 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1045
1046 kfree(s);
1047
1048 return r;
1049 }
1050
1051 static ssize_t
1052 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1053 loff_t *ppos)
1054 {
1055 struct ftrace_event_file *file;
1056 char *buf;
1057 int err = -ENODEV;
1058
1059 if (cnt >= PAGE_SIZE)
1060 return -EINVAL;
1061
1062 buf = (char *)__get_free_page(GFP_TEMPORARY);
1063 if (!buf)
1064 return -ENOMEM;
1065
1066 if (copy_from_user(buf, ubuf, cnt)) {
1067 free_page((unsigned long) buf);
1068 return -EFAULT;
1069 }
1070 buf[cnt] = '\0';
1071
1072 mutex_lock(&event_mutex);
1073 file = event_file_data(filp);
1074 if (file)
1075 err = apply_event_filter(file, buf);
1076 mutex_unlock(&event_mutex);
1077
1078 free_page((unsigned long) buf);
1079 if (err < 0)
1080 return err;
1081
1082 *ppos += cnt;
1083
1084 return cnt;
1085 }
1086
1087 static LIST_HEAD(event_subsystems);
1088
1089 static int subsystem_open(struct inode *inode, struct file *filp)
1090 {
1091 struct event_subsystem *system = NULL;
1092 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1093 struct trace_array *tr;
1094 int ret;
1095
1096 if (tracing_is_disabled())
1097 return -ENODEV;
1098
1099 /* Make sure the system still exists */
1100 mutex_lock(&trace_types_lock);
1101 mutex_lock(&event_mutex);
1102 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1103 list_for_each_entry(dir, &tr->systems, list) {
1104 if (dir == inode->i_private) {
1105 /* Don't open systems with no events */
1106 if (dir->nr_events) {
1107 __get_system_dir(dir);
1108 system = dir->subsystem;
1109 }
1110 goto exit_loop;
1111 }
1112 }
1113 }
1114 exit_loop:
1115 mutex_unlock(&event_mutex);
1116 mutex_unlock(&trace_types_lock);
1117
1118 if (!system)
1119 return -ENODEV;
1120
1121 /* Some versions of gcc think dir can be uninitialized here */
1122 WARN_ON(!dir);
1123
1124 /* Still need to increment the ref count of the system */
1125 if (trace_array_get(tr) < 0) {
1126 put_system(dir);
1127 return -ENODEV;
1128 }
1129
1130 ret = tracing_open_generic(inode, filp);
1131 if (ret < 0) {
1132 trace_array_put(tr);
1133 put_system(dir);
1134 }
1135
1136 return ret;
1137 }
1138
1139 static int system_tr_open(struct inode *inode, struct file *filp)
1140 {
1141 struct ftrace_subsystem_dir *dir;
1142 struct trace_array *tr = inode->i_private;
1143 int ret;
1144
1145 if (tracing_is_disabled())
1146 return -ENODEV;
1147
1148 if (trace_array_get(tr) < 0)
1149 return -ENODEV;
1150
1151 /* Make a temporary dir that has no system but points to tr */
1152 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1153 if (!dir) {
1154 trace_array_put(tr);
1155 return -ENOMEM;
1156 }
1157
1158 dir->tr = tr;
1159
1160 ret = tracing_open_generic(inode, filp);
1161 if (ret < 0) {
1162 trace_array_put(tr);
1163 kfree(dir);
1164 return ret;
1165 }
1166
1167 filp->private_data = dir;
1168
1169 return 0;
1170 }
1171
1172 static int subsystem_release(struct inode *inode, struct file *file)
1173 {
1174 struct ftrace_subsystem_dir *dir = file->private_data;
1175
1176 trace_array_put(dir->tr);
1177
1178 /*
1179 * If dir->subsystem is NULL, then this is a temporary
1180 * descriptor that was made for a trace_array to enable
1181 * all subsystems.
1182 */
1183 if (dir->subsystem)
1184 put_system(dir);
1185 else
1186 kfree(dir);
1187
1188 return 0;
1189 }
1190
1191 static ssize_t
1192 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1193 loff_t *ppos)
1194 {
1195 struct ftrace_subsystem_dir *dir = filp->private_data;
1196 struct event_subsystem *system = dir->subsystem;
1197 struct trace_seq *s;
1198 int r;
1199
1200 if (*ppos)
1201 return 0;
1202
1203 s = kmalloc(sizeof(*s), GFP_KERNEL);
1204 if (!s)
1205 return -ENOMEM;
1206
1207 trace_seq_init(s);
1208
1209 print_subsystem_event_filter(system, s);
1210 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1211
1212 kfree(s);
1213
1214 return r;
1215 }
1216
1217 static ssize_t
1218 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1219 loff_t *ppos)
1220 {
1221 struct ftrace_subsystem_dir *dir = filp->private_data;
1222 char *buf;
1223 int err;
1224
1225 if (cnt >= PAGE_SIZE)
1226 return -EINVAL;
1227
1228 buf = (char *)__get_free_page(GFP_TEMPORARY);
1229 if (!buf)
1230 return -ENOMEM;
1231
1232 if (copy_from_user(buf, ubuf, cnt)) {
1233 free_page((unsigned long) buf);
1234 return -EFAULT;
1235 }
1236 buf[cnt] = '\0';
1237
1238 err = apply_subsystem_event_filter(dir, buf);
1239 free_page((unsigned long) buf);
1240 if (err < 0)
1241 return err;
1242
1243 *ppos += cnt;
1244
1245 return cnt;
1246 }
1247
1248 static ssize_t
1249 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1250 {
1251 int (*func)(struct trace_seq *s) = filp->private_data;
1252 struct trace_seq *s;
1253 int r;
1254
1255 if (*ppos)
1256 return 0;
1257
1258 s = kmalloc(sizeof(*s), GFP_KERNEL);
1259 if (!s)
1260 return -ENOMEM;
1261
1262 trace_seq_init(s);
1263
1264 func(s);
1265 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1266
1267 kfree(s);
1268
1269 return r;
1270 }
1271
1272 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1273 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1274 static int ftrace_event_release(struct inode *inode, struct file *file);
1275
1276 static const struct seq_operations show_event_seq_ops = {
1277 .start = t_start,
1278 .next = t_next,
1279 .show = t_show,
1280 .stop = t_stop,
1281 };
1282
1283 static const struct seq_operations show_set_event_seq_ops = {
1284 .start = s_start,
1285 .next = s_next,
1286 .show = t_show,
1287 .stop = t_stop,
1288 };
1289
1290 static const struct file_operations ftrace_avail_fops = {
1291 .open = ftrace_event_avail_open,
1292 .read = seq_read,
1293 .llseek = seq_lseek,
1294 .release = seq_release,
1295 };
1296
1297 static const struct file_operations ftrace_set_event_fops = {
1298 .open = ftrace_event_set_open,
1299 .read = seq_read,
1300 .write = ftrace_event_write,
1301 .llseek = seq_lseek,
1302 .release = ftrace_event_release,
1303 };
1304
1305 static const struct file_operations ftrace_enable_fops = {
1306 .open = tracing_open_generic,
1307 .read = event_enable_read,
1308 .write = event_enable_write,
1309 .llseek = default_llseek,
1310 };
1311
1312 static const struct file_operations ftrace_event_format_fops = {
1313 .open = trace_format_open,
1314 .read = seq_read,
1315 .llseek = seq_lseek,
1316 .release = seq_release,
1317 };
1318
1319 static const struct file_operations ftrace_event_id_fops = {
1320 .read = event_id_read,
1321 .llseek = default_llseek,
1322 };
1323
1324 static const struct file_operations ftrace_event_filter_fops = {
1325 .open = tracing_open_generic,
1326 .read = event_filter_read,
1327 .write = event_filter_write,
1328 .llseek = default_llseek,
1329 };
1330
1331 static const struct file_operations ftrace_subsystem_filter_fops = {
1332 .open = subsystem_open,
1333 .read = subsystem_filter_read,
1334 .write = subsystem_filter_write,
1335 .llseek = default_llseek,
1336 .release = subsystem_release,
1337 };
1338
1339 static const struct file_operations ftrace_system_enable_fops = {
1340 .open = subsystem_open,
1341 .read = system_enable_read,
1342 .write = system_enable_write,
1343 .llseek = default_llseek,
1344 .release = subsystem_release,
1345 };
1346
1347 static const struct file_operations ftrace_tr_enable_fops = {
1348 .open = system_tr_open,
1349 .read = system_enable_read,
1350 .write = system_enable_write,
1351 .llseek = default_llseek,
1352 .release = subsystem_release,
1353 };
1354
1355 static const struct file_operations ftrace_show_header_fops = {
1356 .open = tracing_open_generic,
1357 .read = show_header,
1358 .llseek = default_llseek,
1359 };
1360
1361 static int
1362 ftrace_event_open(struct inode *inode, struct file *file,
1363 const struct seq_operations *seq_ops)
1364 {
1365 struct seq_file *m;
1366 int ret;
1367
1368 ret = seq_open(file, seq_ops);
1369 if (ret < 0)
1370 return ret;
1371 m = file->private_data;
1372 /* copy tr over to seq ops */
1373 m->private = inode->i_private;
1374
1375 return ret;
1376 }
1377
1378 static int ftrace_event_release(struct inode *inode, struct file *file)
1379 {
1380 struct trace_array *tr = inode->i_private;
1381
1382 trace_array_put(tr);
1383
1384 return seq_release(inode, file);
1385 }
1386
1387 static int
1388 ftrace_event_avail_open(struct inode *inode, struct file *file)
1389 {
1390 const struct seq_operations *seq_ops = &show_event_seq_ops;
1391
1392 return ftrace_event_open(inode, file, seq_ops);
1393 }
1394
1395 static int
1396 ftrace_event_set_open(struct inode *inode, struct file *file)
1397 {
1398 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1399 struct trace_array *tr = inode->i_private;
1400 int ret;
1401
1402 if (trace_array_get(tr) < 0)
1403 return -ENODEV;
1404
1405 if ((file->f_mode & FMODE_WRITE) &&
1406 (file->f_flags & O_TRUNC))
1407 ftrace_clear_events(tr);
1408
1409 ret = ftrace_event_open(inode, file, seq_ops);
1410 if (ret < 0)
1411 trace_array_put(tr);
1412 return ret;
1413 }
1414
1415 static struct event_subsystem *
1416 create_new_subsystem(const char *name)
1417 {
1418 struct event_subsystem *system;
1419
1420 /* need to create new entry */
1421 system = kmalloc(sizeof(*system), GFP_KERNEL);
1422 if (!system)
1423 return NULL;
1424
1425 system->ref_count = 1;
1426
1427 /* Only allocate if dynamic (kprobes and modules) */
1428 if (!core_kernel_data((unsigned long)name)) {
1429 system->ref_count |= SYSTEM_FL_FREE_NAME;
1430 system->name = kstrdup(name, GFP_KERNEL);
1431 if (!system->name)
1432 goto out_free;
1433 } else
1434 system->name = name;
1435
1436 system->filter = NULL;
1437
1438 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1439 if (!system->filter)
1440 goto out_free;
1441
1442 list_add(&system->list, &event_subsystems);
1443
1444 return system;
1445
1446 out_free:
1447 if (system->ref_count & SYSTEM_FL_FREE_NAME)
1448 kfree(system->name);
1449 kfree(system);
1450 return NULL;
1451 }
1452
1453 static struct dentry *
1454 event_subsystem_dir(struct trace_array *tr, const char *name,
1455 struct ftrace_event_file *file, struct dentry *parent)
1456 {
1457 struct ftrace_subsystem_dir *dir;
1458 struct event_subsystem *system;
1459 struct dentry *entry;
1460
1461 /* First see if we did not already create this dir */
1462 list_for_each_entry(dir, &tr->systems, list) {
1463 system = dir->subsystem;
1464 if (strcmp(system->name, name) == 0) {
1465 dir->nr_events++;
1466 file->system = dir;
1467 return dir->entry;
1468 }
1469 }
1470
1471 /* Now see if the system itself exists. */
1472 list_for_each_entry(system, &event_subsystems, list) {
1473 if (strcmp(system->name, name) == 0)
1474 break;
1475 }
1476 /* Reset system variable when not found */
1477 if (&system->list == &event_subsystems)
1478 system = NULL;
1479
1480 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1481 if (!dir)
1482 goto out_fail;
1483
1484 if (!system) {
1485 system = create_new_subsystem(name);
1486 if (!system)
1487 goto out_free;
1488 } else
1489 __get_system(system);
1490
1491 dir->entry = debugfs_create_dir(name, parent);
1492 if (!dir->entry) {
1493 pr_warning("Failed to create system directory %s\n", name);
1494 __put_system(system);
1495 goto out_free;
1496 }
1497
1498 dir->tr = tr;
1499 dir->ref_count = 1;
1500 dir->nr_events = 1;
1501 dir->subsystem = system;
1502 file->system = dir;
1503
1504 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1505 &ftrace_subsystem_filter_fops);
1506 if (!entry) {
1507 kfree(system->filter);
1508 system->filter = NULL;
1509 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1510 }
1511
1512 trace_create_file("enable", 0644, dir->entry, dir,
1513 &ftrace_system_enable_fops);
1514
1515 list_add(&dir->list, &tr->systems);
1516
1517 return dir->entry;
1518
1519 out_free:
1520 kfree(dir);
1521 out_fail:
1522 /* Only print this message if failed on memory allocation */
1523 if (!dir || !system)
1524 pr_warning("No memory to create event subsystem %s\n",
1525 name);
1526 return NULL;
1527 }
1528
1529 static int
1530 event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1531 {
1532 struct ftrace_event_call *call = file->event_call;
1533 struct trace_array *tr = file->tr;
1534 struct list_head *head;
1535 struct dentry *d_events;
1536 int ret;
1537
1538 /*
1539 * If the trace point header did not define TRACE_SYSTEM
1540 * then the system would be called "TRACE_SYSTEM".
1541 */
1542 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1543 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1544 if (!d_events)
1545 return -ENOMEM;
1546 } else
1547 d_events = parent;
1548
1549 file->dir = debugfs_create_dir(call->name, d_events);
1550 if (!file->dir) {
1551 pr_warning("Could not create debugfs '%s' directory\n",
1552 call->name);
1553 return -1;
1554 }
1555
1556 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1557 trace_create_file("enable", 0644, file->dir, file,
1558 &ftrace_enable_fops);
1559
1560 #ifdef CONFIG_PERF_EVENTS
1561 if (call->event.type && call->class->reg)
1562 trace_create_file("id", 0444, file->dir,
1563 (void *)(long)call->event.type,
1564 &ftrace_event_id_fops);
1565 #endif
1566
1567 /*
1568 * Other events may have the same class. Only update
1569 * the fields if they are not already defined.
1570 */
1571 head = trace_get_fields(call);
1572 if (list_empty(head)) {
1573 ret = call->class->define_fields(call);
1574 if (ret < 0) {
1575 pr_warning("Could not initialize trace point"
1576 " events/%s\n", call->name);
1577 return -1;
1578 }
1579 }
1580 trace_create_file("filter", 0644, file->dir, file,
1581 &ftrace_event_filter_fops);
1582
1583 trace_create_file("trigger", 0644, file->dir, file,
1584 &event_trigger_fops);
1585
1586 trace_create_file("format", 0444, file->dir, call,
1587 &ftrace_event_format_fops);
1588
1589 return 0;
1590 }
1591
1592 static void remove_event_from_tracers(struct ftrace_event_call *call)
1593 {
1594 struct ftrace_event_file *file;
1595 struct trace_array *tr;
1596
1597 do_for_each_event_file_safe(tr, file) {
1598 if (file->event_call != call)
1599 continue;
1600
1601 remove_event_file_dir(file);
1602 /*
1603 * The do_for_each_event_file_safe() is
1604 * a double loop. After finding the call for this
1605 * trace_array, we use break to jump to the next
1606 * trace_array.
1607 */
1608 break;
1609 } while_for_each_event_file();
1610 }
1611
1612 static void event_remove(struct ftrace_event_call *call)
1613 {
1614 struct trace_array *tr;
1615 struct ftrace_event_file *file;
1616
1617 do_for_each_event_file(tr, file) {
1618 if (file->event_call != call)
1619 continue;
1620 ftrace_event_enable_disable(file, 0);
1621 destroy_preds(file);
1622 /*
1623 * The do_for_each_event_file() is
1624 * a double loop. After finding the call for this
1625 * trace_array, we use break to jump to the next
1626 * trace_array.
1627 */
1628 break;
1629 } while_for_each_event_file();
1630
1631 if (call->event.funcs)
1632 __unregister_ftrace_event(&call->event);
1633 remove_event_from_tracers(call);
1634 list_del(&call->list);
1635 }
1636
1637 static int event_init(struct ftrace_event_call *call)
1638 {
1639 int ret = 0;
1640
1641 if (WARN_ON(!call->name))
1642 return -EINVAL;
1643
1644 if (call->class->raw_init) {
1645 ret = call->class->raw_init(call);
1646 if (ret < 0 && ret != -ENOSYS)
1647 pr_warn("Could not initialize trace events/%s\n",
1648 call->name);
1649 }
1650
1651 return ret;
1652 }
1653
1654 static int
1655 __register_event(struct ftrace_event_call *call, struct module *mod)
1656 {
1657 int ret;
1658
1659 ret = event_init(call);
1660 if (ret < 0)
1661 return ret;
1662
1663 list_add(&call->list, &ftrace_events);
1664 call->mod = mod;
1665
1666 return 0;
1667 }
1668
1669 static struct ftrace_event_file *
1670 trace_create_new_event(struct ftrace_event_call *call,
1671 struct trace_array *tr)
1672 {
1673 struct ftrace_event_file *file;
1674
1675 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1676 if (!file)
1677 return NULL;
1678
1679 file->event_call = call;
1680 file->tr = tr;
1681 atomic_set(&file->sm_ref, 0);
1682 atomic_set(&file->tm_ref, 0);
1683 INIT_LIST_HEAD(&file->triggers);
1684 list_add(&file->list, &tr->events);
1685
1686 return file;
1687 }
1688
1689 /* Add an event to a trace directory */
1690 static int
1691 __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
1692 {
1693 struct ftrace_event_file *file;
1694
1695 file = trace_create_new_event(call, tr);
1696 if (!file)
1697 return -ENOMEM;
1698
1699 return event_create_dir(tr->event_dir, file);
1700 }
1701
1702 /*
1703 * Just create a decriptor for early init. A descriptor is required
1704 * for enabling events at boot. We want to enable events before
1705 * the filesystem is initialized.
1706 */
1707 static __init int
1708 __trace_early_add_new_event(struct ftrace_event_call *call,
1709 struct trace_array *tr)
1710 {
1711 struct ftrace_event_file *file;
1712
1713 file = trace_create_new_event(call, tr);
1714 if (!file)
1715 return -ENOMEM;
1716
1717 return 0;
1718 }
1719
1720 struct ftrace_module_file_ops;
1721 static void __add_event_to_tracers(struct ftrace_event_call *call);
1722
1723 /* Add an additional event_call dynamically */
1724 int trace_add_event_call(struct ftrace_event_call *call)
1725 {
1726 int ret;
1727 mutex_lock(&trace_types_lock);
1728 mutex_lock(&event_mutex);
1729
1730 ret = __register_event(call, NULL);
1731 if (ret >= 0)
1732 __add_event_to_tracers(call);
1733
1734 mutex_unlock(&event_mutex);
1735 mutex_unlock(&trace_types_lock);
1736 return ret;
1737 }
1738
1739 /*
1740 * Must be called under locking of trace_types_lock, event_mutex and
1741 * trace_event_sem.
1742 */
1743 static void __trace_remove_event_call(struct ftrace_event_call *call)
1744 {
1745 event_remove(call);
1746 trace_destroy_fields(call);
1747 destroy_call_preds(call);
1748 }
1749
1750 static int probe_remove_event_call(struct ftrace_event_call *call)
1751 {
1752 struct trace_array *tr;
1753 struct ftrace_event_file *file;
1754
1755 #ifdef CONFIG_PERF_EVENTS
1756 if (call->perf_refcount)
1757 return -EBUSY;
1758 #endif
1759 do_for_each_event_file(tr, file) {
1760 if (file->event_call != call)
1761 continue;
1762 /*
1763 * We can't rely on ftrace_event_enable_disable(enable => 0)
1764 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1765 * TRACE_REG_UNREGISTER.
1766 */
1767 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1768 return -EBUSY;
1769 /*
1770 * The do_for_each_event_file_safe() is
1771 * a double loop. After finding the call for this
1772 * trace_array, we use break to jump to the next
1773 * trace_array.
1774 */
1775 break;
1776 } while_for_each_event_file();
1777
1778 __trace_remove_event_call(call);
1779
1780 return 0;
1781 }
1782
1783 /* Remove an event_call */
1784 int trace_remove_event_call(struct ftrace_event_call *call)
1785 {
1786 int ret;
1787
1788 mutex_lock(&trace_types_lock);
1789 mutex_lock(&event_mutex);
1790 down_write(&trace_event_sem);
1791 ret = probe_remove_event_call(call);
1792 up_write(&trace_event_sem);
1793 mutex_unlock(&event_mutex);
1794 mutex_unlock(&trace_types_lock);
1795
1796 return ret;
1797 }
1798
1799 #define for_each_event(event, start, end) \
1800 for (event = start; \
1801 (unsigned long)event < (unsigned long)end; \
1802 event++)
1803
1804 #ifdef CONFIG_MODULES
1805
1806 static void trace_module_add_events(struct module *mod)
1807 {
1808 struct ftrace_event_call **call, **start, **end;
1809
1810 start = mod->trace_events;
1811 end = mod->trace_events + mod->num_trace_events;
1812
1813 for_each_event(call, start, end) {
1814 __register_event(*call, mod);
1815 __add_event_to_tracers(*call);
1816 }
1817 }
1818
1819 static void trace_module_remove_events(struct module *mod)
1820 {
1821 struct ftrace_event_call *call, *p;
1822 bool clear_trace = false;
1823
1824 down_write(&trace_event_sem);
1825 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1826 if (call->mod == mod) {
1827 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1828 clear_trace = true;
1829 __trace_remove_event_call(call);
1830 }
1831 }
1832 up_write(&trace_event_sem);
1833
1834 /*
1835 * It is safest to reset the ring buffer if the module being unloaded
1836 * registered any events that were used. The only worry is if
1837 * a new module gets loaded, and takes on the same id as the events
1838 * of this module. When printing out the buffer, traced events left
1839 * over from this module may be passed to the new module events and
1840 * unexpected results may occur.
1841 */
1842 if (clear_trace)
1843 tracing_reset_all_online_cpus();
1844 }
1845
1846 static int trace_module_notify(struct notifier_block *self,
1847 unsigned long val, void *data)
1848 {
1849 struct module *mod = data;
1850
1851 mutex_lock(&trace_types_lock);
1852 mutex_lock(&event_mutex);
1853 switch (val) {
1854 case MODULE_STATE_COMING:
1855 trace_module_add_events(mod);
1856 break;
1857 case MODULE_STATE_GOING:
1858 trace_module_remove_events(mod);
1859 break;
1860 }
1861 mutex_unlock(&event_mutex);
1862 mutex_unlock(&trace_types_lock);
1863
1864 return 0;
1865 }
1866
1867 static struct notifier_block trace_module_nb = {
1868 .notifier_call = trace_module_notify,
1869 .priority = 0,
1870 };
1871 #endif /* CONFIG_MODULES */
1872
1873 /* Create a new event directory structure for a trace directory. */
1874 static void
1875 __trace_add_event_dirs(struct trace_array *tr)
1876 {
1877 struct ftrace_event_call *call;
1878 int ret;
1879
1880 list_for_each_entry(call, &ftrace_events, list) {
1881 ret = __trace_add_new_event(call, tr);
1882 if (ret < 0)
1883 pr_warning("Could not create directory for event %s\n",
1884 call->name);
1885 }
1886 }
1887
1888 struct ftrace_event_file *
1889 find_event_file(struct trace_array *tr, const char *system, const char *event)
1890 {
1891 struct ftrace_event_file *file;
1892 struct ftrace_event_call *call;
1893
1894 list_for_each_entry(file, &tr->events, list) {
1895
1896 call = file->event_call;
1897
1898 if (!call->name || !call->class || !call->class->reg)
1899 continue;
1900
1901 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1902 continue;
1903
1904 if (strcmp(event, call->name) == 0 &&
1905 strcmp(system, call->class->system) == 0)
1906 return file;
1907 }
1908 return NULL;
1909 }
1910
1911 #ifdef CONFIG_DYNAMIC_FTRACE
1912
1913 /* Avoid typos */
1914 #define ENABLE_EVENT_STR "enable_event"
1915 #define DISABLE_EVENT_STR "disable_event"
1916
1917 struct event_probe_data {
1918 struct ftrace_event_file *file;
1919 unsigned long count;
1920 int ref;
1921 bool enable;
1922 };
1923
1924 static void
1925 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1926 {
1927 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1928 struct event_probe_data *data = *pdata;
1929
1930 if (!data)
1931 return;
1932
1933 if (data->enable)
1934 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1935 else
1936 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1937 }
1938
1939 static void
1940 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1941 {
1942 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1943 struct event_probe_data *data = *pdata;
1944
1945 if (!data)
1946 return;
1947
1948 if (!data->count)
1949 return;
1950
1951 /* Skip if the event is in a state we want to switch to */
1952 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1953 return;
1954
1955 if (data->count != -1)
1956 (data->count)--;
1957
1958 event_enable_probe(ip, parent_ip, _data);
1959 }
1960
1961 static int
1962 event_enable_print(struct seq_file *m, unsigned long ip,
1963 struct ftrace_probe_ops *ops, void *_data)
1964 {
1965 struct event_probe_data *data = _data;
1966
1967 seq_printf(m, "%ps:", (void *)ip);
1968
1969 seq_printf(m, "%s:%s:%s",
1970 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1971 data->file->event_call->class->system,
1972 data->file->event_call->name);
1973
1974 if (data->count == -1)
1975 seq_printf(m, ":unlimited\n");
1976 else
1977 seq_printf(m, ":count=%ld\n", data->count);
1978
1979 return 0;
1980 }
1981
1982 static int
1983 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1984 void **_data)
1985 {
1986 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1987 struct event_probe_data *data = *pdata;
1988
1989 data->ref++;
1990 return 0;
1991 }
1992
1993 static void
1994 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
1995 void **_data)
1996 {
1997 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1998 struct event_probe_data *data = *pdata;
1999
2000 if (WARN_ON_ONCE(data->ref <= 0))
2001 return;
2002
2003 data->ref--;
2004 if (!data->ref) {
2005 /* Remove the SOFT_MODE flag */
2006 __ftrace_event_enable_disable(data->file, 0, 1);
2007 module_put(data->file->event_call->mod);
2008 kfree(data);
2009 }
2010 *pdata = NULL;
2011 }
2012
2013 static struct ftrace_probe_ops event_enable_probe_ops = {
2014 .func = event_enable_probe,
2015 .print = event_enable_print,
2016 .init = event_enable_init,
2017 .free = event_enable_free,
2018 };
2019
2020 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2021 .func = event_enable_count_probe,
2022 .print = event_enable_print,
2023 .init = event_enable_init,
2024 .free = event_enable_free,
2025 };
2026
2027 static struct ftrace_probe_ops event_disable_probe_ops = {
2028 .func = event_enable_probe,
2029 .print = event_enable_print,
2030 .init = event_enable_init,
2031 .free = event_enable_free,
2032 };
2033
2034 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2035 .func = event_enable_count_probe,
2036 .print = event_enable_print,
2037 .init = event_enable_init,
2038 .free = event_enable_free,
2039 };
2040
2041 static int
2042 event_enable_func(struct ftrace_hash *hash,
2043 char *glob, char *cmd, char *param, int enabled)
2044 {
2045 struct trace_array *tr = top_trace_array();
2046 struct ftrace_event_file *file;
2047 struct ftrace_probe_ops *ops;
2048 struct event_probe_data *data;
2049 const char *system;
2050 const char *event;
2051 char *number;
2052 bool enable;
2053 int ret;
2054
2055 /* hash funcs only work with set_ftrace_filter */
2056 if (!enabled || !param)
2057 return -EINVAL;
2058
2059 system = strsep(&param, ":");
2060 if (!param)
2061 return -EINVAL;
2062
2063 event = strsep(&param, ":");
2064
2065 mutex_lock(&event_mutex);
2066
2067 ret = -EINVAL;
2068 file = find_event_file(tr, system, event);
2069 if (!file)
2070 goto out;
2071
2072 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2073
2074 if (enable)
2075 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2076 else
2077 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2078
2079 if (glob[0] == '!') {
2080 unregister_ftrace_function_probe_func(glob+1, ops);
2081 ret = 0;
2082 goto out;
2083 }
2084
2085 ret = -ENOMEM;
2086 data = kzalloc(sizeof(*data), GFP_KERNEL);
2087 if (!data)
2088 goto out;
2089
2090 data->enable = enable;
2091 data->count = -1;
2092 data->file = file;
2093
2094 if (!param)
2095 goto out_reg;
2096
2097 number = strsep(&param, ":");
2098
2099 ret = -EINVAL;
2100 if (!strlen(number))
2101 goto out_free;
2102
2103 /*
2104 * We use the callback data field (which is a pointer)
2105 * as our counter.
2106 */
2107 ret = kstrtoul(number, 0, &data->count);
2108 if (ret)
2109 goto out_free;
2110
2111 out_reg:
2112 /* Don't let event modules unload while probe registered */
2113 ret = try_module_get(file->event_call->mod);
2114 if (!ret) {
2115 ret = -EBUSY;
2116 goto out_free;
2117 }
2118
2119 ret = __ftrace_event_enable_disable(file, 1, 1);
2120 if (ret < 0)
2121 goto out_put;
2122 ret = register_ftrace_function_probe(glob, ops, data);
2123 /*
2124 * The above returns on success the # of functions enabled,
2125 * but if it didn't find any functions it returns zero.
2126 * Consider no functions a failure too.
2127 */
2128 if (!ret) {
2129 ret = -ENOENT;
2130 goto out_disable;
2131 } else if (ret < 0)
2132 goto out_disable;
2133 /* Just return zero, not the number of enabled functions */
2134 ret = 0;
2135 out:
2136 mutex_unlock(&event_mutex);
2137 return ret;
2138
2139 out_disable:
2140 __ftrace_event_enable_disable(file, 0, 1);
2141 out_put:
2142 module_put(file->event_call->mod);
2143 out_free:
2144 kfree(data);
2145 goto out;
2146 }
2147
2148 static struct ftrace_func_command event_enable_cmd = {
2149 .name = ENABLE_EVENT_STR,
2150 .func = event_enable_func,
2151 };
2152
2153 static struct ftrace_func_command event_disable_cmd = {
2154 .name = DISABLE_EVENT_STR,
2155 .func = event_enable_func,
2156 };
2157
2158 static __init int register_event_cmds(void)
2159 {
2160 int ret;
2161
2162 ret = register_ftrace_command(&event_enable_cmd);
2163 if (WARN_ON(ret < 0))
2164 return ret;
2165 ret = register_ftrace_command(&event_disable_cmd);
2166 if (WARN_ON(ret < 0))
2167 unregister_ftrace_command(&event_enable_cmd);
2168 return ret;
2169 }
2170 #else
2171 static inline int register_event_cmds(void) { return 0; }
2172 #endif /* CONFIG_DYNAMIC_FTRACE */
2173
2174 /*
2175 * The top level array has already had its ftrace_event_file
2176 * descriptors created in order to allow for early events to
2177 * be recorded. This function is called after the debugfs has been
2178 * initialized, and we now have to create the files associated
2179 * to the events.
2180 */
2181 static __init void
2182 __trace_early_add_event_dirs(struct trace_array *tr)
2183 {
2184 struct ftrace_event_file *file;
2185 int ret;
2186
2187
2188 list_for_each_entry(file, &tr->events, list) {
2189 ret = event_create_dir(tr->event_dir, file);
2190 if (ret < 0)
2191 pr_warning("Could not create directory for event %s\n",
2192 file->event_call->name);
2193 }
2194 }
2195
2196 /*
2197 * For early boot up, the top trace array requires to have
2198 * a list of events that can be enabled. This must be done before
2199 * the filesystem is set up in order to allow events to be traced
2200 * early.
2201 */
2202 static __init void
2203 __trace_early_add_events(struct trace_array *tr)
2204 {
2205 struct ftrace_event_call *call;
2206 int ret;
2207
2208 list_for_each_entry(call, &ftrace_events, list) {
2209 /* Early boot up should not have any modules loaded */
2210 if (WARN_ON_ONCE(call->mod))
2211 continue;
2212
2213 ret = __trace_early_add_new_event(call, tr);
2214 if (ret < 0)
2215 pr_warning("Could not create early event %s\n",
2216 call->name);
2217 }
2218 }
2219
2220 /* Remove the event directory structure for a trace directory. */
2221 static void
2222 __trace_remove_event_dirs(struct trace_array *tr)
2223 {
2224 struct ftrace_event_file *file, *next;
2225
2226 list_for_each_entry_safe(file, next, &tr->events, list)
2227 remove_event_file_dir(file);
2228 }
2229
2230 static void __add_event_to_tracers(struct ftrace_event_call *call)
2231 {
2232 struct trace_array *tr;
2233
2234 list_for_each_entry(tr, &ftrace_trace_arrays, list)
2235 __trace_add_new_event(call, tr);
2236 }
2237
2238 extern struct ftrace_event_call *__start_ftrace_events[];
2239 extern struct ftrace_event_call *__stop_ftrace_events[];
2240
2241 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2242
2243 static __init int setup_trace_event(char *str)
2244 {
2245 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2246 ring_buffer_expanded = true;
2247 tracing_selftest_disabled = true;
2248
2249 return 1;
2250 }
2251 __setup("trace_event=", setup_trace_event);
2252
2253 /* Expects to have event_mutex held when called */
2254 static int
2255 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2256 {
2257 struct dentry *d_events;
2258 struct dentry *entry;
2259
2260 entry = debugfs_create_file("set_event", 0644, parent,
2261 tr, &ftrace_set_event_fops);
2262 if (!entry) {
2263 pr_warning("Could not create debugfs 'set_event' entry\n");
2264 return -ENOMEM;
2265 }
2266
2267 d_events = debugfs_create_dir("events", parent);
2268 if (!d_events) {
2269 pr_warning("Could not create debugfs 'events' directory\n");
2270 return -ENOMEM;
2271 }
2272
2273 /* ring buffer internal formats */
2274 trace_create_file("header_page", 0444, d_events,
2275 ring_buffer_print_page_header,
2276 &ftrace_show_header_fops);
2277
2278 trace_create_file("header_event", 0444, d_events,
2279 ring_buffer_print_entry_header,
2280 &ftrace_show_header_fops);
2281
2282 trace_create_file("enable", 0644, d_events,
2283 tr, &ftrace_tr_enable_fops);
2284
2285 tr->event_dir = d_events;
2286
2287 return 0;
2288 }
2289
2290 /**
2291 * event_trace_add_tracer - add a instance of a trace_array to events
2292 * @parent: The parent dentry to place the files/directories for events in
2293 * @tr: The trace array associated with these events
2294 *
2295 * When a new instance is created, it needs to set up its events
2296 * directory, as well as other files associated with events. It also
2297 * creates the event hierachry in the @parent/events directory.
2298 *
2299 * Returns 0 on success.
2300 */
2301 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2302 {
2303 int ret;
2304
2305 mutex_lock(&event_mutex);
2306
2307 ret = create_event_toplevel_files(parent, tr);
2308 if (ret)
2309 goto out_unlock;
2310
2311 down_write(&trace_event_sem);
2312 __trace_add_event_dirs(tr);
2313 up_write(&trace_event_sem);
2314
2315 out_unlock:
2316 mutex_unlock(&event_mutex);
2317
2318 return ret;
2319 }
2320
2321 /*
2322 * The top trace array already had its file descriptors created.
2323 * Now the files themselves need to be created.
2324 */
2325 static __init int
2326 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2327 {
2328 int ret;
2329
2330 mutex_lock(&event_mutex);
2331
2332 ret = create_event_toplevel_files(parent, tr);
2333 if (ret)
2334 goto out_unlock;
2335
2336 down_write(&trace_event_sem);
2337 __trace_early_add_event_dirs(tr);
2338 up_write(&trace_event_sem);
2339
2340 out_unlock:
2341 mutex_unlock(&event_mutex);
2342
2343 return ret;
2344 }
2345
2346 int event_trace_del_tracer(struct trace_array *tr)
2347 {
2348 mutex_lock(&event_mutex);
2349
2350 /* Disable any event triggers and associated soft-disabled events */
2351 clear_event_triggers(tr);
2352
2353 /* Disable any running events */
2354 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2355
2356 /* Access to events are within rcu_read_lock_sched() */
2357 synchronize_sched();
2358
2359 down_write(&trace_event_sem);
2360 __trace_remove_event_dirs(tr);
2361 debugfs_remove_recursive(tr->event_dir);
2362 up_write(&trace_event_sem);
2363
2364 tr->event_dir = NULL;
2365
2366 mutex_unlock(&event_mutex);
2367
2368 return 0;
2369 }
2370
2371 static __init int event_trace_memsetup(void)
2372 {
2373 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2374 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2375 return 0;
2376 }
2377
2378 static __init int event_trace_enable(void)
2379 {
2380 struct trace_array *tr = top_trace_array();
2381 struct ftrace_event_call **iter, *call;
2382 char *buf = bootup_event_buf;
2383 char *token;
2384 int ret;
2385
2386 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2387
2388 call = *iter;
2389 ret = event_init(call);
2390 if (!ret)
2391 list_add(&call->list, &ftrace_events);
2392 }
2393
2394 /*
2395 * We need the top trace array to have a working set of trace
2396 * points at early init, before the debug files and directories
2397 * are created. Create the file entries now, and attach them
2398 * to the actual file dentries later.
2399 */
2400 __trace_early_add_events(tr);
2401
2402 while (true) {
2403 token = strsep(&buf, ",");
2404
2405 if (!token)
2406 break;
2407 if (!*token)
2408 continue;
2409
2410 ret = ftrace_set_clr_event(tr, token, 1);
2411 if (ret)
2412 pr_warn("Failed to enable trace event: %s\n", token);
2413 }
2414
2415 trace_printk_start_comm();
2416
2417 register_event_cmds();
2418
2419 register_trigger_cmds();
2420
2421 return 0;
2422 }
2423
2424 static __init int event_trace_init(void)
2425 {
2426 struct trace_array *tr;
2427 struct dentry *d_tracer;
2428 struct dentry *entry;
2429 int ret;
2430
2431 tr = top_trace_array();
2432
2433 d_tracer = tracing_init_dentry();
2434 if (!d_tracer)
2435 return 0;
2436
2437 entry = debugfs_create_file("available_events", 0444, d_tracer,
2438 tr, &ftrace_avail_fops);
2439 if (!entry)
2440 pr_warning("Could not create debugfs "
2441 "'available_events' entry\n");
2442
2443 if (trace_define_common_fields())
2444 pr_warning("tracing: Failed to allocate common fields");
2445
2446 ret = early_event_add_tracer(d_tracer, tr);
2447 if (ret)
2448 return ret;
2449
2450 #ifdef CONFIG_MODULES
2451 ret = register_module_notifier(&trace_module_nb);
2452 if (ret)
2453 pr_warning("Failed to register trace events module notifier\n");
2454 #endif
2455 return 0;
2456 }
2457 early_initcall(event_trace_memsetup);
2458 core_initcall(event_trace_enable);
2459 fs_initcall(event_trace_init);
2460
2461 #ifdef CONFIG_FTRACE_STARTUP_TEST
2462
2463 static DEFINE_SPINLOCK(test_spinlock);
2464 static DEFINE_SPINLOCK(test_spinlock_irq);
2465 static DEFINE_MUTEX(test_mutex);
2466
2467 static __init void test_work(struct work_struct *dummy)
2468 {
2469 spin_lock(&test_spinlock);
2470 spin_lock_irq(&test_spinlock_irq);
2471 udelay(1);
2472 spin_unlock_irq(&test_spinlock_irq);
2473 spin_unlock(&test_spinlock);
2474
2475 mutex_lock(&test_mutex);
2476 msleep(1);
2477 mutex_unlock(&test_mutex);
2478 }
2479
2480 static __init int event_test_thread(void *unused)
2481 {
2482 void *test_malloc;
2483
2484 test_malloc = kmalloc(1234, GFP_KERNEL);
2485 if (!test_malloc)
2486 pr_info("failed to kmalloc\n");
2487
2488 schedule_on_each_cpu(test_work);
2489
2490 kfree(test_malloc);
2491
2492 set_current_state(TASK_INTERRUPTIBLE);
2493 while (!kthread_should_stop())
2494 schedule();
2495
2496 return 0;
2497 }
2498
2499 /*
2500 * Do various things that may trigger events.
2501 */
2502 static __init void event_test_stuff(void)
2503 {
2504 struct task_struct *test_thread;
2505
2506 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2507 msleep(1);
2508 kthread_stop(test_thread);
2509 }
2510
2511 /*
2512 * For every trace event defined, we will test each trace point separately,
2513 * and then by groups, and finally all trace points.
2514 */
2515 static __init void event_trace_self_tests(void)
2516 {
2517 struct ftrace_subsystem_dir *dir;
2518 struct ftrace_event_file *file;
2519 struct ftrace_event_call *call;
2520 struct event_subsystem *system;
2521 struct trace_array *tr;
2522 int ret;
2523
2524 tr = top_trace_array();
2525
2526 pr_info("Running tests on trace events:\n");
2527
2528 list_for_each_entry(file, &tr->events, list) {
2529
2530 call = file->event_call;
2531
2532 /* Only test those that have a probe */
2533 if (!call->class || !call->class->probe)
2534 continue;
2535
2536 /*
2537 * Testing syscall events here is pretty useless, but
2538 * we still do it if configured. But this is time consuming.
2539 * What we really need is a user thread to perform the
2540 * syscalls as we test.
2541 */
2542 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2543 if (call->class->system &&
2544 strcmp(call->class->system, "syscalls") == 0)
2545 continue;
2546 #endif
2547
2548 pr_info("Testing event %s: ", call->name);
2549
2550 /*
2551 * If an event is already enabled, someone is using
2552 * it and the self test should not be on.
2553 */
2554 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2555 pr_warning("Enabled event during self test!\n");
2556 WARN_ON_ONCE(1);
2557 continue;
2558 }
2559
2560 ftrace_event_enable_disable(file, 1);
2561 event_test_stuff();
2562 ftrace_event_enable_disable(file, 0);
2563
2564 pr_cont("OK\n");
2565 }
2566
2567 /* Now test at the sub system level */
2568
2569 pr_info("Running tests on trace event systems:\n");
2570
2571 list_for_each_entry(dir, &tr->systems, list) {
2572
2573 system = dir->subsystem;
2574
2575 /* the ftrace system is special, skip it */
2576 if (strcmp(system->name, "ftrace") == 0)
2577 continue;
2578
2579 pr_info("Testing event system %s: ", system->name);
2580
2581 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2582 if (WARN_ON_ONCE(ret)) {
2583 pr_warning("error enabling system %s\n",
2584 system->name);
2585 continue;
2586 }
2587
2588 event_test_stuff();
2589
2590 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2591 if (WARN_ON_ONCE(ret)) {
2592 pr_warning("error disabling system %s\n",
2593 system->name);
2594 continue;
2595 }
2596
2597 pr_cont("OK\n");
2598 }
2599
2600 /* Test with all events enabled */
2601
2602 pr_info("Running tests on all trace events:\n");
2603 pr_info("Testing all events: ");
2604
2605 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2606 if (WARN_ON_ONCE(ret)) {
2607 pr_warning("error enabling all events\n");
2608 return;
2609 }
2610
2611 event_test_stuff();
2612
2613 /* reset sysname */
2614 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2615 if (WARN_ON_ONCE(ret)) {
2616 pr_warning("error disabling all events\n");
2617 return;
2618 }
2619
2620 pr_cont("OK\n");
2621 }
2622
2623 #ifdef CONFIG_FUNCTION_TRACER
2624
2625 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2626
2627 static void
2628 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2629 struct ftrace_ops *op, struct pt_regs *pt_regs)
2630 {
2631 struct ring_buffer_event *event;
2632 struct ring_buffer *buffer;
2633 struct ftrace_entry *entry;
2634 unsigned long flags;
2635 long disabled;
2636 int cpu;
2637 int pc;
2638
2639 pc = preempt_count();
2640 preempt_disable_notrace();
2641 cpu = raw_smp_processor_id();
2642 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2643
2644 if (disabled != 1)
2645 goto out;
2646
2647 local_save_flags(flags);
2648
2649 event = trace_current_buffer_lock_reserve(&buffer,
2650 TRACE_FN, sizeof(*entry),
2651 flags, pc);
2652 if (!event)
2653 goto out;
2654 entry = ring_buffer_event_data(event);
2655 entry->ip = ip;
2656 entry->parent_ip = parent_ip;
2657
2658 trace_buffer_unlock_commit(buffer, event, flags, pc);
2659
2660 out:
2661 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2662 preempt_enable_notrace();
2663 }
2664
2665 static struct ftrace_ops trace_ops __initdata =
2666 {
2667 .func = function_test_events_call,
2668 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2669 };
2670
2671 static __init void event_trace_self_test_with_function(void)
2672 {
2673 int ret;
2674 ret = register_ftrace_function(&trace_ops);
2675 if (WARN_ON(ret < 0)) {
2676 pr_info("Failed to enable function tracer for event tests\n");
2677 return;
2678 }
2679 pr_info("Running tests again, along with the function tracer\n");
2680 event_trace_self_tests();
2681 unregister_ftrace_function(&trace_ops);
2682 }
2683 #else
2684 static __init void event_trace_self_test_with_function(void)
2685 {
2686 }
2687 #endif
2688
2689 static __init int event_trace_self_tests_init(void)
2690 {
2691 if (!tracing_selftest_disabled) {
2692 event_trace_self_tests();
2693 event_trace_self_test_with_function();
2694 }
2695
2696 return 0;
2697 }
2698
2699 late_initcall(event_trace_self_tests_init);
2700
2701 #endif
This page took 0.087021 seconds and 5 git commands to generate.