kernel/*: switch to memdup_user_nul()
[deliverable/linux.git] / kernel / trace / trace_events_trigger.c
1 /*
2 * trace_events_trigger - trace event triggers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25
26 #include "trace.h"
27
28 static LIST_HEAD(trigger_commands);
29 static DEFINE_MUTEX(trigger_cmd_mutex);
30
31 static void
32 trigger_data_free(struct event_trigger_data *data)
33 {
34 if (data->cmd_ops->set_filter)
35 data->cmd_ops->set_filter(NULL, data, NULL);
36
37 synchronize_sched(); /* make sure current triggers exit before free */
38 kfree(data);
39 }
40
41 /**
42 * event_triggers_call - Call triggers associated with a trace event
43 * @file: The trace_event_file associated with the event
44 * @rec: The trace entry for the event, NULL for unconditional invocation
45 *
46 * For each trigger associated with an event, invoke the trigger
47 * function registered with the associated trigger command. If rec is
48 * non-NULL, it means that the trigger requires further processing and
49 * shouldn't be unconditionally invoked. If rec is non-NULL and the
50 * trigger has a filter associated with it, rec will checked against
51 * the filter and if the record matches the trigger will be invoked.
52 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
53 * in any case until the current event is written, the trigger
54 * function isn't invoked but the bit associated with the deferred
55 * trigger is set in the return value.
56 *
57 * Returns an enum event_trigger_type value containing a set bit for
58 * any trigger that should be deferred, ETT_NONE if nothing to defer.
59 *
60 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
61 *
62 * Return: an enum event_trigger_type value containing a set bit for
63 * any trigger that should be deferred, ETT_NONE if nothing to defer.
64 */
65 enum event_trigger_type
66 event_triggers_call(struct trace_event_file *file, void *rec)
67 {
68 struct event_trigger_data *data;
69 enum event_trigger_type tt = ETT_NONE;
70 struct event_filter *filter;
71
72 if (list_empty(&file->triggers))
73 return tt;
74
75 list_for_each_entry_rcu(data, &file->triggers, list) {
76 if (!rec) {
77 data->ops->func(data);
78 continue;
79 }
80 filter = rcu_dereference_sched(data->filter);
81 if (filter && !filter_match_preds(filter, rec))
82 continue;
83 if (data->cmd_ops->post_trigger) {
84 tt |= data->cmd_ops->trigger_type;
85 continue;
86 }
87 data->ops->func(data);
88 }
89 return tt;
90 }
91 EXPORT_SYMBOL_GPL(event_triggers_call);
92
93 /**
94 * event_triggers_post_call - Call 'post_triggers' for a trace event
95 * @file: The trace_event_file associated with the event
96 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
97 *
98 * For each trigger associated with an event, invoke the trigger
99 * function registered with the associated trigger command, if the
100 * corresponding bit is set in the tt enum passed into this function.
101 * See @event_triggers_call for details on how those bits are set.
102 *
103 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
104 */
105 void
106 event_triggers_post_call(struct trace_event_file *file,
107 enum event_trigger_type tt)
108 {
109 struct event_trigger_data *data;
110
111 list_for_each_entry_rcu(data, &file->triggers, list) {
112 if (data->cmd_ops->trigger_type & tt)
113 data->ops->func(data);
114 }
115 }
116 EXPORT_SYMBOL_GPL(event_triggers_post_call);
117
118 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
119
120 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
121 {
122 struct trace_event_file *event_file = event_file_data(m->private);
123
124 if (t == SHOW_AVAILABLE_TRIGGERS)
125 return NULL;
126
127 return seq_list_next(t, &event_file->triggers, pos);
128 }
129
130 static void *trigger_start(struct seq_file *m, loff_t *pos)
131 {
132 struct trace_event_file *event_file;
133
134 /* ->stop() is called even if ->start() fails */
135 mutex_lock(&event_mutex);
136 event_file = event_file_data(m->private);
137 if (unlikely(!event_file))
138 return ERR_PTR(-ENODEV);
139
140 if (list_empty(&event_file->triggers))
141 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
142
143 return seq_list_start(&event_file->triggers, *pos);
144 }
145
146 static void trigger_stop(struct seq_file *m, void *t)
147 {
148 mutex_unlock(&event_mutex);
149 }
150
151 static int trigger_show(struct seq_file *m, void *v)
152 {
153 struct event_trigger_data *data;
154 struct event_command *p;
155
156 if (v == SHOW_AVAILABLE_TRIGGERS) {
157 seq_puts(m, "# Available triggers:\n");
158 seq_putc(m, '#');
159 mutex_lock(&trigger_cmd_mutex);
160 list_for_each_entry_reverse(p, &trigger_commands, list)
161 seq_printf(m, " %s", p->name);
162 seq_putc(m, '\n');
163 mutex_unlock(&trigger_cmd_mutex);
164 return 0;
165 }
166
167 data = list_entry(v, struct event_trigger_data, list);
168 data->ops->print(m, data->ops, data);
169
170 return 0;
171 }
172
173 static const struct seq_operations event_triggers_seq_ops = {
174 .start = trigger_start,
175 .next = trigger_next,
176 .stop = trigger_stop,
177 .show = trigger_show,
178 };
179
180 static int event_trigger_regex_open(struct inode *inode, struct file *file)
181 {
182 int ret = 0;
183
184 mutex_lock(&event_mutex);
185
186 if (unlikely(!event_file_data(file))) {
187 mutex_unlock(&event_mutex);
188 return -ENODEV;
189 }
190
191 if (file->f_mode & FMODE_READ) {
192 ret = seq_open(file, &event_triggers_seq_ops);
193 if (!ret) {
194 struct seq_file *m = file->private_data;
195 m->private = file;
196 }
197 }
198
199 mutex_unlock(&event_mutex);
200
201 return ret;
202 }
203
204 static int trigger_process_regex(struct trace_event_file *file, char *buff)
205 {
206 char *command, *next = buff;
207 struct event_command *p;
208 int ret = -EINVAL;
209
210 command = strsep(&next, ": \t");
211 command = (command[0] != '!') ? command : command + 1;
212
213 mutex_lock(&trigger_cmd_mutex);
214 list_for_each_entry(p, &trigger_commands, list) {
215 if (strcmp(p->name, command) == 0) {
216 ret = p->func(p, file, buff, command, next);
217 goto out_unlock;
218 }
219 }
220 out_unlock:
221 mutex_unlock(&trigger_cmd_mutex);
222
223 return ret;
224 }
225
226 static ssize_t event_trigger_regex_write(struct file *file,
227 const char __user *ubuf,
228 size_t cnt, loff_t *ppos)
229 {
230 struct trace_event_file *event_file;
231 ssize_t ret;
232 char *buf;
233
234 if (!cnt)
235 return 0;
236
237 if (cnt >= PAGE_SIZE)
238 return -EINVAL;
239
240 buf = memdup_user_nul(ubuf, cnt);
241 if (IS_ERR(buf))
242 return PTR_ERR(buf);
243
244 strim(buf);
245
246 mutex_lock(&event_mutex);
247 event_file = event_file_data(file);
248 if (unlikely(!event_file)) {
249 mutex_unlock(&event_mutex);
250 kfree(buf);
251 return -ENODEV;
252 }
253 ret = trigger_process_regex(event_file, buf);
254 mutex_unlock(&event_mutex);
255
256 kfree(buf);
257 if (ret < 0)
258 goto out;
259
260 *ppos += cnt;
261 ret = cnt;
262 out:
263 return ret;
264 }
265
266 static int event_trigger_regex_release(struct inode *inode, struct file *file)
267 {
268 mutex_lock(&event_mutex);
269
270 if (file->f_mode & FMODE_READ)
271 seq_release(inode, file);
272
273 mutex_unlock(&event_mutex);
274
275 return 0;
276 }
277
278 static ssize_t
279 event_trigger_write(struct file *filp, const char __user *ubuf,
280 size_t cnt, loff_t *ppos)
281 {
282 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
283 }
284
285 static int
286 event_trigger_open(struct inode *inode, struct file *filp)
287 {
288 return event_trigger_regex_open(inode, filp);
289 }
290
291 static int
292 event_trigger_release(struct inode *inode, struct file *file)
293 {
294 return event_trigger_regex_release(inode, file);
295 }
296
297 const struct file_operations event_trigger_fops = {
298 .open = event_trigger_open,
299 .read = seq_read,
300 .write = event_trigger_write,
301 .llseek = tracing_lseek,
302 .release = event_trigger_release,
303 };
304
305 /*
306 * Currently we only register event commands from __init, so mark this
307 * __init too.
308 */
309 static __init int register_event_command(struct event_command *cmd)
310 {
311 struct event_command *p;
312 int ret = 0;
313
314 mutex_lock(&trigger_cmd_mutex);
315 list_for_each_entry(p, &trigger_commands, list) {
316 if (strcmp(cmd->name, p->name) == 0) {
317 ret = -EBUSY;
318 goto out_unlock;
319 }
320 }
321 list_add(&cmd->list, &trigger_commands);
322 out_unlock:
323 mutex_unlock(&trigger_cmd_mutex);
324
325 return ret;
326 }
327
328 /*
329 * Currently we only unregister event commands from __init, so mark
330 * this __init too.
331 */
332 static __init int unregister_event_command(struct event_command *cmd)
333 {
334 struct event_command *p, *n;
335 int ret = -ENODEV;
336
337 mutex_lock(&trigger_cmd_mutex);
338 list_for_each_entry_safe(p, n, &trigger_commands, list) {
339 if (strcmp(cmd->name, p->name) == 0) {
340 ret = 0;
341 list_del_init(&p->list);
342 goto out_unlock;
343 }
344 }
345 out_unlock:
346 mutex_unlock(&trigger_cmd_mutex);
347
348 return ret;
349 }
350
351 /**
352 * event_trigger_print - Generic event_trigger_ops @print implementation
353 * @name: The name of the event trigger
354 * @m: The seq_file being printed to
355 * @data: Trigger-specific data
356 * @filter_str: filter_str to print, if present
357 *
358 * Common implementation for event triggers to print themselves.
359 *
360 * Usually wrapped by a function that simply sets the @name of the
361 * trigger command and then invokes this.
362 *
363 * Return: 0 on success, errno otherwise
364 */
365 static int
366 event_trigger_print(const char *name, struct seq_file *m,
367 void *data, char *filter_str)
368 {
369 long count = (long)data;
370
371 seq_puts(m, name);
372
373 if (count == -1)
374 seq_puts(m, ":unlimited");
375 else
376 seq_printf(m, ":count=%ld", count);
377
378 if (filter_str)
379 seq_printf(m, " if %s\n", filter_str);
380 else
381 seq_putc(m, '\n');
382
383 return 0;
384 }
385
386 /**
387 * event_trigger_init - Generic event_trigger_ops @init implementation
388 * @ops: The trigger ops associated with the trigger
389 * @data: Trigger-specific data
390 *
391 * Common implementation of event trigger initialization.
392 *
393 * Usually used directly as the @init method in event trigger
394 * implementations.
395 *
396 * Return: 0 on success, errno otherwise
397 */
398 static int
399 event_trigger_init(struct event_trigger_ops *ops,
400 struct event_trigger_data *data)
401 {
402 data->ref++;
403 return 0;
404 }
405
406 /**
407 * event_trigger_free - Generic event_trigger_ops @free implementation
408 * @ops: The trigger ops associated with the trigger
409 * @data: Trigger-specific data
410 *
411 * Common implementation of event trigger de-initialization.
412 *
413 * Usually used directly as the @free method in event trigger
414 * implementations.
415 */
416 static void
417 event_trigger_free(struct event_trigger_ops *ops,
418 struct event_trigger_data *data)
419 {
420 if (WARN_ON_ONCE(data->ref <= 0))
421 return;
422
423 data->ref--;
424 if (!data->ref)
425 trigger_data_free(data);
426 }
427
428 static int trace_event_trigger_enable_disable(struct trace_event_file *file,
429 int trigger_enable)
430 {
431 int ret = 0;
432
433 if (trigger_enable) {
434 if (atomic_inc_return(&file->tm_ref) > 1)
435 return ret;
436 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
437 ret = trace_event_enable_disable(file, 1, 1);
438 } else {
439 if (atomic_dec_return(&file->tm_ref) > 0)
440 return ret;
441 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
442 ret = trace_event_enable_disable(file, 0, 1);
443 }
444
445 return ret;
446 }
447
448 /**
449 * clear_event_triggers - Clear all triggers associated with a trace array
450 * @tr: The trace array to clear
451 *
452 * For each trigger, the triggering event has its tm_ref decremented
453 * via trace_event_trigger_enable_disable(), and any associated event
454 * (in the case of enable/disable_event triggers) will have its sm_ref
455 * decremented via free()->trace_event_enable_disable(). That
456 * combination effectively reverses the soft-mode/trigger state added
457 * by trigger registration.
458 *
459 * Must be called with event_mutex held.
460 */
461 void
462 clear_event_triggers(struct trace_array *tr)
463 {
464 struct trace_event_file *file;
465
466 list_for_each_entry(file, &tr->events, list) {
467 struct event_trigger_data *data;
468 list_for_each_entry_rcu(data, &file->triggers, list) {
469 trace_event_trigger_enable_disable(file, 0);
470 if (data->ops->free)
471 data->ops->free(data->ops, data);
472 }
473 }
474 }
475
476 /**
477 * update_cond_flag - Set or reset the TRIGGER_COND bit
478 * @file: The trace_event_file associated with the event
479 *
480 * If an event has triggers and any of those triggers has a filter or
481 * a post_trigger, trigger invocation needs to be deferred until after
482 * the current event has logged its data, and the event should have
483 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
484 * cleared.
485 */
486 static void update_cond_flag(struct trace_event_file *file)
487 {
488 struct event_trigger_data *data;
489 bool set_cond = false;
490
491 list_for_each_entry_rcu(data, &file->triggers, list) {
492 if (data->filter || data->cmd_ops->post_trigger) {
493 set_cond = true;
494 break;
495 }
496 }
497
498 if (set_cond)
499 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
500 else
501 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
502 }
503
504 /**
505 * register_trigger - Generic event_command @reg implementation
506 * @glob: The raw string used to register the trigger
507 * @ops: The trigger ops associated with the trigger
508 * @data: Trigger-specific data to associate with the trigger
509 * @file: The trace_event_file associated with the event
510 *
511 * Common implementation for event trigger registration.
512 *
513 * Usually used directly as the @reg method in event command
514 * implementations.
515 *
516 * Return: 0 on success, errno otherwise
517 */
518 static int register_trigger(char *glob, struct event_trigger_ops *ops,
519 struct event_trigger_data *data,
520 struct trace_event_file *file)
521 {
522 struct event_trigger_data *test;
523 int ret = 0;
524
525 list_for_each_entry_rcu(test, &file->triggers, list) {
526 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
527 ret = -EEXIST;
528 goto out;
529 }
530 }
531
532 if (data->ops->init) {
533 ret = data->ops->init(data->ops, data);
534 if (ret < 0)
535 goto out;
536 }
537
538 list_add_rcu(&data->list, &file->triggers);
539 ret++;
540
541 if (trace_event_trigger_enable_disable(file, 1) < 0) {
542 list_del_rcu(&data->list);
543 ret--;
544 }
545 update_cond_flag(file);
546 out:
547 return ret;
548 }
549
550 /**
551 * unregister_trigger - Generic event_command @unreg implementation
552 * @glob: The raw string used to register the trigger
553 * @ops: The trigger ops associated with the trigger
554 * @test: Trigger-specific data used to find the trigger to remove
555 * @file: The trace_event_file associated with the event
556 *
557 * Common implementation for event trigger unregistration.
558 *
559 * Usually used directly as the @unreg method in event command
560 * implementations.
561 */
562 static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
563 struct event_trigger_data *test,
564 struct trace_event_file *file)
565 {
566 struct event_trigger_data *data;
567 bool unregistered = false;
568
569 list_for_each_entry_rcu(data, &file->triggers, list) {
570 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
571 unregistered = true;
572 list_del_rcu(&data->list);
573 update_cond_flag(file);
574 trace_event_trigger_enable_disable(file, 0);
575 break;
576 }
577 }
578
579 if (unregistered && data->ops->free)
580 data->ops->free(data->ops, data);
581 }
582
583 /**
584 * event_trigger_callback - Generic event_command @func implementation
585 * @cmd_ops: The command ops, used for trigger registration
586 * @file: The trace_event_file associated with the event
587 * @glob: The raw string used to register the trigger
588 * @cmd: The cmd portion of the string used to register the trigger
589 * @param: The params portion of the string used to register the trigger
590 *
591 * Common implementation for event command parsing and trigger
592 * instantiation.
593 *
594 * Usually used directly as the @func method in event command
595 * implementations.
596 *
597 * Return: 0 on success, errno otherwise
598 */
599 static int
600 event_trigger_callback(struct event_command *cmd_ops,
601 struct trace_event_file *file,
602 char *glob, char *cmd, char *param)
603 {
604 struct event_trigger_data *trigger_data;
605 struct event_trigger_ops *trigger_ops;
606 char *trigger = NULL;
607 char *number;
608 int ret;
609
610 /* separate the trigger from the filter (t:n [if filter]) */
611 if (param && isdigit(param[0]))
612 trigger = strsep(&param, " \t");
613
614 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
615
616 ret = -ENOMEM;
617 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
618 if (!trigger_data)
619 goto out;
620
621 trigger_data->count = -1;
622 trigger_data->ops = trigger_ops;
623 trigger_data->cmd_ops = cmd_ops;
624 INIT_LIST_HEAD(&trigger_data->list);
625
626 if (glob[0] == '!') {
627 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
628 kfree(trigger_data);
629 ret = 0;
630 goto out;
631 }
632
633 if (trigger) {
634 number = strsep(&trigger, ":");
635
636 ret = -EINVAL;
637 if (!strlen(number))
638 goto out_free;
639
640 /*
641 * We use the callback data field (which is a pointer)
642 * as our counter.
643 */
644 ret = kstrtoul(number, 0, &trigger_data->count);
645 if (ret)
646 goto out_free;
647 }
648
649 if (!param) /* if param is non-empty, it's supposed to be a filter */
650 goto out_reg;
651
652 if (!cmd_ops->set_filter)
653 goto out_reg;
654
655 ret = cmd_ops->set_filter(param, trigger_data, file);
656 if (ret < 0)
657 goto out_free;
658
659 out_reg:
660 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
661 /*
662 * The above returns on success the # of functions enabled,
663 * but if it didn't find any functions it returns zero.
664 * Consider no functions a failure too.
665 */
666 if (!ret) {
667 ret = -ENOENT;
668 goto out_free;
669 } else if (ret < 0)
670 goto out_free;
671 ret = 0;
672 out:
673 return ret;
674
675 out_free:
676 if (cmd_ops->set_filter)
677 cmd_ops->set_filter(NULL, trigger_data, NULL);
678 kfree(trigger_data);
679 goto out;
680 }
681
682 /**
683 * set_trigger_filter - Generic event_command @set_filter implementation
684 * @filter_str: The filter string for the trigger, NULL to remove filter
685 * @trigger_data: Trigger-specific data
686 * @file: The trace_event_file associated with the event
687 *
688 * Common implementation for event command filter parsing and filter
689 * instantiation.
690 *
691 * Usually used directly as the @set_filter method in event command
692 * implementations.
693 *
694 * Also used to remove a filter (if filter_str = NULL).
695 *
696 * Return: 0 on success, errno otherwise
697 */
698 static int set_trigger_filter(char *filter_str,
699 struct event_trigger_data *trigger_data,
700 struct trace_event_file *file)
701 {
702 struct event_trigger_data *data = trigger_data;
703 struct event_filter *filter = NULL, *tmp;
704 int ret = -EINVAL;
705 char *s;
706
707 if (!filter_str) /* clear the current filter */
708 goto assign;
709
710 s = strsep(&filter_str, " \t");
711
712 if (!strlen(s) || strcmp(s, "if") != 0)
713 goto out;
714
715 if (!filter_str)
716 goto out;
717
718 /* The filter is for the 'trigger' event, not the triggered event */
719 ret = create_event_filter(file->event_call, filter_str, false, &filter);
720 if (ret)
721 goto out;
722 assign:
723 tmp = rcu_access_pointer(data->filter);
724
725 rcu_assign_pointer(data->filter, filter);
726
727 if (tmp) {
728 /* Make sure the call is done with the filter */
729 synchronize_sched();
730 free_event_filter(tmp);
731 }
732
733 kfree(data->filter_str);
734 data->filter_str = NULL;
735
736 if (filter_str) {
737 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
738 if (!data->filter_str) {
739 free_event_filter(rcu_access_pointer(data->filter));
740 data->filter = NULL;
741 ret = -ENOMEM;
742 }
743 }
744 out:
745 return ret;
746 }
747
748 static void
749 traceon_trigger(struct event_trigger_data *data)
750 {
751 if (tracing_is_on())
752 return;
753
754 tracing_on();
755 }
756
757 static void
758 traceon_count_trigger(struct event_trigger_data *data)
759 {
760 if (tracing_is_on())
761 return;
762
763 if (!data->count)
764 return;
765
766 if (data->count != -1)
767 (data->count)--;
768
769 tracing_on();
770 }
771
772 static void
773 traceoff_trigger(struct event_trigger_data *data)
774 {
775 if (!tracing_is_on())
776 return;
777
778 tracing_off();
779 }
780
781 static void
782 traceoff_count_trigger(struct event_trigger_data *data)
783 {
784 if (!tracing_is_on())
785 return;
786
787 if (!data->count)
788 return;
789
790 if (data->count != -1)
791 (data->count)--;
792
793 tracing_off();
794 }
795
796 static int
797 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
798 struct event_trigger_data *data)
799 {
800 return event_trigger_print("traceon", m, (void *)data->count,
801 data->filter_str);
802 }
803
804 static int
805 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
806 struct event_trigger_data *data)
807 {
808 return event_trigger_print("traceoff", m, (void *)data->count,
809 data->filter_str);
810 }
811
812 static struct event_trigger_ops traceon_trigger_ops = {
813 .func = traceon_trigger,
814 .print = traceon_trigger_print,
815 .init = event_trigger_init,
816 .free = event_trigger_free,
817 };
818
819 static struct event_trigger_ops traceon_count_trigger_ops = {
820 .func = traceon_count_trigger,
821 .print = traceon_trigger_print,
822 .init = event_trigger_init,
823 .free = event_trigger_free,
824 };
825
826 static struct event_trigger_ops traceoff_trigger_ops = {
827 .func = traceoff_trigger,
828 .print = traceoff_trigger_print,
829 .init = event_trigger_init,
830 .free = event_trigger_free,
831 };
832
833 static struct event_trigger_ops traceoff_count_trigger_ops = {
834 .func = traceoff_count_trigger,
835 .print = traceoff_trigger_print,
836 .init = event_trigger_init,
837 .free = event_trigger_free,
838 };
839
840 static struct event_trigger_ops *
841 onoff_get_trigger_ops(char *cmd, char *param)
842 {
843 struct event_trigger_ops *ops;
844
845 /* we register both traceon and traceoff to this callback */
846 if (strcmp(cmd, "traceon") == 0)
847 ops = param ? &traceon_count_trigger_ops :
848 &traceon_trigger_ops;
849 else
850 ops = param ? &traceoff_count_trigger_ops :
851 &traceoff_trigger_ops;
852
853 return ops;
854 }
855
856 static struct event_command trigger_traceon_cmd = {
857 .name = "traceon",
858 .trigger_type = ETT_TRACE_ONOFF,
859 .func = event_trigger_callback,
860 .reg = register_trigger,
861 .unreg = unregister_trigger,
862 .get_trigger_ops = onoff_get_trigger_ops,
863 .set_filter = set_trigger_filter,
864 };
865
866 static struct event_command trigger_traceoff_cmd = {
867 .name = "traceoff",
868 .trigger_type = ETT_TRACE_ONOFF,
869 .func = event_trigger_callback,
870 .reg = register_trigger,
871 .unreg = unregister_trigger,
872 .get_trigger_ops = onoff_get_trigger_ops,
873 .set_filter = set_trigger_filter,
874 };
875
876 #ifdef CONFIG_TRACER_SNAPSHOT
877 static void
878 snapshot_trigger(struct event_trigger_data *data)
879 {
880 tracing_snapshot();
881 }
882
883 static void
884 snapshot_count_trigger(struct event_trigger_data *data)
885 {
886 if (!data->count)
887 return;
888
889 if (data->count != -1)
890 (data->count)--;
891
892 snapshot_trigger(data);
893 }
894
895 static int
896 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
897 struct event_trigger_data *data,
898 struct trace_event_file *file)
899 {
900 int ret = register_trigger(glob, ops, data, file);
901
902 if (ret > 0 && tracing_alloc_snapshot() != 0) {
903 unregister_trigger(glob, ops, data, file);
904 ret = 0;
905 }
906
907 return ret;
908 }
909
910 static int
911 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
912 struct event_trigger_data *data)
913 {
914 return event_trigger_print("snapshot", m, (void *)data->count,
915 data->filter_str);
916 }
917
918 static struct event_trigger_ops snapshot_trigger_ops = {
919 .func = snapshot_trigger,
920 .print = snapshot_trigger_print,
921 .init = event_trigger_init,
922 .free = event_trigger_free,
923 };
924
925 static struct event_trigger_ops snapshot_count_trigger_ops = {
926 .func = snapshot_count_trigger,
927 .print = snapshot_trigger_print,
928 .init = event_trigger_init,
929 .free = event_trigger_free,
930 };
931
932 static struct event_trigger_ops *
933 snapshot_get_trigger_ops(char *cmd, char *param)
934 {
935 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
936 }
937
938 static struct event_command trigger_snapshot_cmd = {
939 .name = "snapshot",
940 .trigger_type = ETT_SNAPSHOT,
941 .func = event_trigger_callback,
942 .reg = register_snapshot_trigger,
943 .unreg = unregister_trigger,
944 .get_trigger_ops = snapshot_get_trigger_ops,
945 .set_filter = set_trigger_filter,
946 };
947
948 static __init int register_trigger_snapshot_cmd(void)
949 {
950 int ret;
951
952 ret = register_event_command(&trigger_snapshot_cmd);
953 WARN_ON(ret < 0);
954
955 return ret;
956 }
957 #else
958 static __init int register_trigger_snapshot_cmd(void) { return 0; }
959 #endif /* CONFIG_TRACER_SNAPSHOT */
960
961 #ifdef CONFIG_STACKTRACE
962 /*
963 * Skip 3:
964 * stacktrace_trigger()
965 * event_triggers_post_call()
966 * trace_event_raw_event_xxx()
967 */
968 #define STACK_SKIP 3
969
970 static void
971 stacktrace_trigger(struct event_trigger_data *data)
972 {
973 trace_dump_stack(STACK_SKIP);
974 }
975
976 static void
977 stacktrace_count_trigger(struct event_trigger_data *data)
978 {
979 if (!data->count)
980 return;
981
982 if (data->count != -1)
983 (data->count)--;
984
985 stacktrace_trigger(data);
986 }
987
988 static int
989 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
990 struct event_trigger_data *data)
991 {
992 return event_trigger_print("stacktrace", m, (void *)data->count,
993 data->filter_str);
994 }
995
996 static struct event_trigger_ops stacktrace_trigger_ops = {
997 .func = stacktrace_trigger,
998 .print = stacktrace_trigger_print,
999 .init = event_trigger_init,
1000 .free = event_trigger_free,
1001 };
1002
1003 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1004 .func = stacktrace_count_trigger,
1005 .print = stacktrace_trigger_print,
1006 .init = event_trigger_init,
1007 .free = event_trigger_free,
1008 };
1009
1010 static struct event_trigger_ops *
1011 stacktrace_get_trigger_ops(char *cmd, char *param)
1012 {
1013 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1014 }
1015
1016 static struct event_command trigger_stacktrace_cmd = {
1017 .name = "stacktrace",
1018 .trigger_type = ETT_STACKTRACE,
1019 .post_trigger = true,
1020 .func = event_trigger_callback,
1021 .reg = register_trigger,
1022 .unreg = unregister_trigger,
1023 .get_trigger_ops = stacktrace_get_trigger_ops,
1024 .set_filter = set_trigger_filter,
1025 };
1026
1027 static __init int register_trigger_stacktrace_cmd(void)
1028 {
1029 int ret;
1030
1031 ret = register_event_command(&trigger_stacktrace_cmd);
1032 WARN_ON(ret < 0);
1033
1034 return ret;
1035 }
1036 #else
1037 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1038 #endif /* CONFIG_STACKTRACE */
1039
1040 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1041 {
1042 unregister_event_command(&trigger_traceon_cmd);
1043 unregister_event_command(&trigger_traceoff_cmd);
1044 }
1045
1046 /* Avoid typos */
1047 #define ENABLE_EVENT_STR "enable_event"
1048 #define DISABLE_EVENT_STR "disable_event"
1049
1050 struct enable_trigger_data {
1051 struct trace_event_file *file;
1052 bool enable;
1053 };
1054
1055 static void
1056 event_enable_trigger(struct event_trigger_data *data)
1057 {
1058 struct enable_trigger_data *enable_data = data->private_data;
1059
1060 if (enable_data->enable)
1061 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1062 else
1063 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1064 }
1065
1066 static void
1067 event_enable_count_trigger(struct event_trigger_data *data)
1068 {
1069 struct enable_trigger_data *enable_data = data->private_data;
1070
1071 if (!data->count)
1072 return;
1073
1074 /* Skip if the event is in a state we want to switch to */
1075 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1076 return;
1077
1078 if (data->count != -1)
1079 (data->count)--;
1080
1081 event_enable_trigger(data);
1082 }
1083
1084 static int
1085 event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1086 struct event_trigger_data *data)
1087 {
1088 struct enable_trigger_data *enable_data = data->private_data;
1089
1090 seq_printf(m, "%s:%s:%s",
1091 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1092 enable_data->file->event_call->class->system,
1093 trace_event_name(enable_data->file->event_call));
1094
1095 if (data->count == -1)
1096 seq_puts(m, ":unlimited");
1097 else
1098 seq_printf(m, ":count=%ld", data->count);
1099
1100 if (data->filter_str)
1101 seq_printf(m, " if %s\n", data->filter_str);
1102 else
1103 seq_putc(m, '\n');
1104
1105 return 0;
1106 }
1107
1108 static void
1109 event_enable_trigger_free(struct event_trigger_ops *ops,
1110 struct event_trigger_data *data)
1111 {
1112 struct enable_trigger_data *enable_data = data->private_data;
1113
1114 if (WARN_ON_ONCE(data->ref <= 0))
1115 return;
1116
1117 data->ref--;
1118 if (!data->ref) {
1119 /* Remove the SOFT_MODE flag */
1120 trace_event_enable_disable(enable_data->file, 0, 1);
1121 module_put(enable_data->file->event_call->mod);
1122 trigger_data_free(data);
1123 kfree(enable_data);
1124 }
1125 }
1126
1127 static struct event_trigger_ops event_enable_trigger_ops = {
1128 .func = event_enable_trigger,
1129 .print = event_enable_trigger_print,
1130 .init = event_trigger_init,
1131 .free = event_enable_trigger_free,
1132 };
1133
1134 static struct event_trigger_ops event_enable_count_trigger_ops = {
1135 .func = event_enable_count_trigger,
1136 .print = event_enable_trigger_print,
1137 .init = event_trigger_init,
1138 .free = event_enable_trigger_free,
1139 };
1140
1141 static struct event_trigger_ops event_disable_trigger_ops = {
1142 .func = event_enable_trigger,
1143 .print = event_enable_trigger_print,
1144 .init = event_trigger_init,
1145 .free = event_enable_trigger_free,
1146 };
1147
1148 static struct event_trigger_ops event_disable_count_trigger_ops = {
1149 .func = event_enable_count_trigger,
1150 .print = event_enable_trigger_print,
1151 .init = event_trigger_init,
1152 .free = event_enable_trigger_free,
1153 };
1154
1155 static int
1156 event_enable_trigger_func(struct event_command *cmd_ops,
1157 struct trace_event_file *file,
1158 char *glob, char *cmd, char *param)
1159 {
1160 struct trace_event_file *event_enable_file;
1161 struct enable_trigger_data *enable_data;
1162 struct event_trigger_data *trigger_data;
1163 struct event_trigger_ops *trigger_ops;
1164 struct trace_array *tr = file->tr;
1165 const char *system;
1166 const char *event;
1167 char *trigger;
1168 char *number;
1169 bool enable;
1170 int ret;
1171
1172 if (!param)
1173 return -EINVAL;
1174
1175 /* separate the trigger from the filter (s:e:n [if filter]) */
1176 trigger = strsep(&param, " \t");
1177 if (!trigger)
1178 return -EINVAL;
1179
1180 system = strsep(&trigger, ":");
1181 if (!trigger)
1182 return -EINVAL;
1183
1184 event = strsep(&trigger, ":");
1185
1186 ret = -EINVAL;
1187 event_enable_file = find_event_file(tr, system, event);
1188 if (!event_enable_file)
1189 goto out;
1190
1191 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1192
1193 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1194
1195 ret = -ENOMEM;
1196 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1197 if (!trigger_data)
1198 goto out;
1199
1200 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1201 if (!enable_data) {
1202 kfree(trigger_data);
1203 goto out;
1204 }
1205
1206 trigger_data->count = -1;
1207 trigger_data->ops = trigger_ops;
1208 trigger_data->cmd_ops = cmd_ops;
1209 INIT_LIST_HEAD(&trigger_data->list);
1210 RCU_INIT_POINTER(trigger_data->filter, NULL);
1211
1212 enable_data->enable = enable;
1213 enable_data->file = event_enable_file;
1214 trigger_data->private_data = enable_data;
1215
1216 if (glob[0] == '!') {
1217 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1218 kfree(trigger_data);
1219 kfree(enable_data);
1220 ret = 0;
1221 goto out;
1222 }
1223
1224 if (trigger) {
1225 number = strsep(&trigger, ":");
1226
1227 ret = -EINVAL;
1228 if (!strlen(number))
1229 goto out_free;
1230
1231 /*
1232 * We use the callback data field (which is a pointer)
1233 * as our counter.
1234 */
1235 ret = kstrtoul(number, 0, &trigger_data->count);
1236 if (ret)
1237 goto out_free;
1238 }
1239
1240 if (!param) /* if param is non-empty, it's supposed to be a filter */
1241 goto out_reg;
1242
1243 if (!cmd_ops->set_filter)
1244 goto out_reg;
1245
1246 ret = cmd_ops->set_filter(param, trigger_data, file);
1247 if (ret < 0)
1248 goto out_free;
1249
1250 out_reg:
1251 /* Don't let event modules unload while probe registered */
1252 ret = try_module_get(event_enable_file->event_call->mod);
1253 if (!ret) {
1254 ret = -EBUSY;
1255 goto out_free;
1256 }
1257
1258 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1259 if (ret < 0)
1260 goto out_put;
1261 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1262 /*
1263 * The above returns on success the # of functions enabled,
1264 * but if it didn't find any functions it returns zero.
1265 * Consider no functions a failure too.
1266 */
1267 if (!ret) {
1268 ret = -ENOENT;
1269 goto out_disable;
1270 } else if (ret < 0)
1271 goto out_disable;
1272 /* Just return zero, not the number of enabled functions */
1273 ret = 0;
1274 out:
1275 return ret;
1276
1277 out_disable:
1278 trace_event_enable_disable(event_enable_file, 0, 1);
1279 out_put:
1280 module_put(event_enable_file->event_call->mod);
1281 out_free:
1282 if (cmd_ops->set_filter)
1283 cmd_ops->set_filter(NULL, trigger_data, NULL);
1284 kfree(trigger_data);
1285 kfree(enable_data);
1286 goto out;
1287 }
1288
1289 static int event_enable_register_trigger(char *glob,
1290 struct event_trigger_ops *ops,
1291 struct event_trigger_data *data,
1292 struct trace_event_file *file)
1293 {
1294 struct enable_trigger_data *enable_data = data->private_data;
1295 struct enable_trigger_data *test_enable_data;
1296 struct event_trigger_data *test;
1297 int ret = 0;
1298
1299 list_for_each_entry_rcu(test, &file->triggers, list) {
1300 test_enable_data = test->private_data;
1301 if (test_enable_data &&
1302 (test_enable_data->file == enable_data->file)) {
1303 ret = -EEXIST;
1304 goto out;
1305 }
1306 }
1307
1308 if (data->ops->init) {
1309 ret = data->ops->init(data->ops, data);
1310 if (ret < 0)
1311 goto out;
1312 }
1313
1314 list_add_rcu(&data->list, &file->triggers);
1315 ret++;
1316
1317 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1318 list_del_rcu(&data->list);
1319 ret--;
1320 }
1321 update_cond_flag(file);
1322 out:
1323 return ret;
1324 }
1325
1326 static void event_enable_unregister_trigger(char *glob,
1327 struct event_trigger_ops *ops,
1328 struct event_trigger_data *test,
1329 struct trace_event_file *file)
1330 {
1331 struct enable_trigger_data *test_enable_data = test->private_data;
1332 struct enable_trigger_data *enable_data;
1333 struct event_trigger_data *data;
1334 bool unregistered = false;
1335
1336 list_for_each_entry_rcu(data, &file->triggers, list) {
1337 enable_data = data->private_data;
1338 if (enable_data &&
1339 (enable_data->file == test_enable_data->file)) {
1340 unregistered = true;
1341 list_del_rcu(&data->list);
1342 update_cond_flag(file);
1343 trace_event_trigger_enable_disable(file, 0);
1344 break;
1345 }
1346 }
1347
1348 if (unregistered && data->ops->free)
1349 data->ops->free(data->ops, data);
1350 }
1351
1352 static struct event_trigger_ops *
1353 event_enable_get_trigger_ops(char *cmd, char *param)
1354 {
1355 struct event_trigger_ops *ops;
1356 bool enable;
1357
1358 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1359
1360 if (enable)
1361 ops = param ? &event_enable_count_trigger_ops :
1362 &event_enable_trigger_ops;
1363 else
1364 ops = param ? &event_disable_count_trigger_ops :
1365 &event_disable_trigger_ops;
1366
1367 return ops;
1368 }
1369
1370 static struct event_command trigger_enable_cmd = {
1371 .name = ENABLE_EVENT_STR,
1372 .trigger_type = ETT_EVENT_ENABLE,
1373 .func = event_enable_trigger_func,
1374 .reg = event_enable_register_trigger,
1375 .unreg = event_enable_unregister_trigger,
1376 .get_trigger_ops = event_enable_get_trigger_ops,
1377 .set_filter = set_trigger_filter,
1378 };
1379
1380 static struct event_command trigger_disable_cmd = {
1381 .name = DISABLE_EVENT_STR,
1382 .trigger_type = ETT_EVENT_ENABLE,
1383 .func = event_enable_trigger_func,
1384 .reg = event_enable_register_trigger,
1385 .unreg = event_enable_unregister_trigger,
1386 .get_trigger_ops = event_enable_get_trigger_ops,
1387 .set_filter = set_trigger_filter,
1388 };
1389
1390 static __init void unregister_trigger_enable_disable_cmds(void)
1391 {
1392 unregister_event_command(&trigger_enable_cmd);
1393 unregister_event_command(&trigger_disable_cmd);
1394 }
1395
1396 static __init int register_trigger_enable_disable_cmds(void)
1397 {
1398 int ret;
1399
1400 ret = register_event_command(&trigger_enable_cmd);
1401 if (WARN_ON(ret < 0))
1402 return ret;
1403 ret = register_event_command(&trigger_disable_cmd);
1404 if (WARN_ON(ret < 0))
1405 unregister_trigger_enable_disable_cmds();
1406
1407 return ret;
1408 }
1409
1410 static __init int register_trigger_traceon_traceoff_cmds(void)
1411 {
1412 int ret;
1413
1414 ret = register_event_command(&trigger_traceon_cmd);
1415 if (WARN_ON(ret < 0))
1416 return ret;
1417 ret = register_event_command(&trigger_traceoff_cmd);
1418 if (WARN_ON(ret < 0))
1419 unregister_trigger_traceon_traceoff_cmds();
1420
1421 return ret;
1422 }
1423
1424 __init int register_trigger_cmds(void)
1425 {
1426 register_trigger_traceon_traceoff_cmds();
1427 register_trigger_snapshot_cmd();
1428 register_trigger_stacktrace_cmd();
1429 register_trigger_enable_disable_cmds();
1430
1431 return 0;
1432 }
This page took 0.11836 seconds and 5 git commands to generate.