spi: Use dev_get_drvdata at appropriate places
[deliverable/linux.git] / kernel / trace / trace_events_filter.c
1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_BAND,
48 OP_NONE,
49 OP_OPEN_PAREN,
50 };
51
52 struct filter_op {
53 int id;
54 char *string;
55 int precedence;
56 };
57
58 /* Order must be the same as enum filter_op_ids above */
59 static struct filter_op filter_ops[] = {
60 { OP_OR, "||", 1 },
61 { OP_AND, "&&", 2 },
62 { OP_GLOB, "~", 4 },
63 { OP_NE, "!=", 4 },
64 { OP_EQ, "==", 4 },
65 { OP_LT, "<", 5 },
66 { OP_LE, "<=", 5 },
67 { OP_GT, ">", 5 },
68 { OP_GE, ">=", 5 },
69 { OP_BAND, "&", 6 },
70 { OP_NONE, "OP_NONE", 0 },
71 { OP_OPEN_PAREN, "(", 0 },
72 };
73
74 enum {
75 FILT_ERR_NONE,
76 FILT_ERR_INVALID_OP,
77 FILT_ERR_UNBALANCED_PAREN,
78 FILT_ERR_TOO_MANY_OPERANDS,
79 FILT_ERR_OPERAND_TOO_LONG,
80 FILT_ERR_FIELD_NOT_FOUND,
81 FILT_ERR_ILLEGAL_FIELD_OP,
82 FILT_ERR_ILLEGAL_INTVAL,
83 FILT_ERR_BAD_SUBSYS_FILTER,
84 FILT_ERR_TOO_MANY_PREDS,
85 FILT_ERR_MISSING_FIELD,
86 FILT_ERR_INVALID_FILTER,
87 FILT_ERR_IP_FIELD_ONLY,
88 };
89
90 static char *err_text[] = {
91 "No error",
92 "Invalid operator",
93 "Unbalanced parens",
94 "Too many operands",
95 "Operand too long",
96 "Field not found",
97 "Illegal operation for field type",
98 "Illegal integer value",
99 "Couldn't find or set field in one of a subsystem's events",
100 "Too many terms in predicate expression",
101 "Missing field name and/or value",
102 "Meaningless filter expression",
103 "Only 'ip' field is supported for function trace",
104 };
105
106 struct opstack_op {
107 int op;
108 struct list_head list;
109 };
110
111 struct postfix_elt {
112 int op;
113 char *operand;
114 struct list_head list;
115 };
116
117 struct filter_parse_state {
118 struct filter_op *ops;
119 struct list_head opstack;
120 struct list_head postfix;
121 int lasterr;
122 int lasterr_pos;
123
124 struct {
125 char *string;
126 unsigned int cnt;
127 unsigned int tail;
128 } infix;
129
130 struct {
131 char string[MAX_FILTER_STR_VAL];
132 int pos;
133 unsigned int tail;
134 } operand;
135 };
136
137 struct pred_stack {
138 struct filter_pred **preds;
139 int index;
140 };
141
142 #define DEFINE_COMPARISON_PRED(type) \
143 static int filter_pred_##type(struct filter_pred *pred, void *event) \
144 { \
145 type *addr = (type *)(event + pred->offset); \
146 type val = (type)pred->val; \
147 int match = 0; \
148 \
149 switch (pred->op) { \
150 case OP_LT: \
151 match = (*addr < val); \
152 break; \
153 case OP_LE: \
154 match = (*addr <= val); \
155 break; \
156 case OP_GT: \
157 match = (*addr > val); \
158 break; \
159 case OP_GE: \
160 match = (*addr >= val); \
161 break; \
162 case OP_BAND: \
163 match = (*addr & val); \
164 break; \
165 default: \
166 break; \
167 } \
168 \
169 return match; \
170 }
171
172 #define DEFINE_EQUALITY_PRED(size) \
173 static int filter_pred_##size(struct filter_pred *pred, void *event) \
174 { \
175 u##size *addr = (u##size *)(event + pred->offset); \
176 u##size val = (u##size)pred->val; \
177 int match; \
178 \
179 match = (val == *addr) ^ pred->not; \
180 \
181 return match; \
182 }
183
184 DEFINE_COMPARISON_PRED(s64);
185 DEFINE_COMPARISON_PRED(u64);
186 DEFINE_COMPARISON_PRED(s32);
187 DEFINE_COMPARISON_PRED(u32);
188 DEFINE_COMPARISON_PRED(s16);
189 DEFINE_COMPARISON_PRED(u16);
190 DEFINE_COMPARISON_PRED(s8);
191 DEFINE_COMPARISON_PRED(u8);
192
193 DEFINE_EQUALITY_PRED(64);
194 DEFINE_EQUALITY_PRED(32);
195 DEFINE_EQUALITY_PRED(16);
196 DEFINE_EQUALITY_PRED(8);
197
198 /* Filter predicate for fixed sized arrays of characters */
199 static int filter_pred_string(struct filter_pred *pred, void *event)
200 {
201 char *addr = (char *)(event + pred->offset);
202 int cmp, match;
203
204 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
205
206 match = cmp ^ pred->not;
207
208 return match;
209 }
210
211 /* Filter predicate for char * pointers */
212 static int filter_pred_pchar(struct filter_pred *pred, void *event)
213 {
214 char **addr = (char **)(event + pred->offset);
215 int cmp, match;
216 int len = strlen(*addr) + 1; /* including tailing '\0' */
217
218 cmp = pred->regex.match(*addr, &pred->regex, len);
219
220 match = cmp ^ pred->not;
221
222 return match;
223 }
224
225 /*
226 * Filter predicate for dynamic sized arrays of characters.
227 * These are implemented through a list of strings at the end
228 * of the entry.
229 * Also each of these strings have a field in the entry which
230 * contains its offset from the beginning of the entry.
231 * We have then first to get this field, dereference it
232 * and add it to the address of the entry, and at last we have
233 * the address of the string.
234 */
235 static int filter_pred_strloc(struct filter_pred *pred, void *event)
236 {
237 u32 str_item = *(u32 *)(event + pred->offset);
238 int str_loc = str_item & 0xffff;
239 int str_len = str_item >> 16;
240 char *addr = (char *)(event + str_loc);
241 int cmp, match;
242
243 cmp = pred->regex.match(addr, &pred->regex, str_len);
244
245 match = cmp ^ pred->not;
246
247 return match;
248 }
249
250 static int filter_pred_none(struct filter_pred *pred, void *event)
251 {
252 return 0;
253 }
254
255 /*
256 * regex_match_foo - Basic regex callbacks
257 *
258 * @str: the string to be searched
259 * @r: the regex structure containing the pattern string
260 * @len: the length of the string to be searched (including '\0')
261 *
262 * Note:
263 * - @str might not be NULL-terminated if it's of type DYN_STRING
264 * or STATIC_STRING
265 */
266
267 static int regex_match_full(char *str, struct regex *r, int len)
268 {
269 if (strncmp(str, r->pattern, len) == 0)
270 return 1;
271 return 0;
272 }
273
274 static int regex_match_front(char *str, struct regex *r, int len)
275 {
276 if (strncmp(str, r->pattern, r->len) == 0)
277 return 1;
278 return 0;
279 }
280
281 static int regex_match_middle(char *str, struct regex *r, int len)
282 {
283 if (strnstr(str, r->pattern, len))
284 return 1;
285 return 0;
286 }
287
288 static int regex_match_end(char *str, struct regex *r, int len)
289 {
290 int strlen = len - 1;
291
292 if (strlen >= r->len &&
293 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
294 return 1;
295 return 0;
296 }
297
298 /**
299 * filter_parse_regex - parse a basic regex
300 * @buff: the raw regex
301 * @len: length of the regex
302 * @search: will point to the beginning of the string to compare
303 * @not: tell whether the match will have to be inverted
304 *
305 * This passes in a buffer containing a regex and this function will
306 * set search to point to the search part of the buffer and
307 * return the type of search it is (see enum above).
308 * This does modify buff.
309 *
310 * Returns enum type.
311 * search returns the pointer to use for comparison.
312 * not returns 1 if buff started with a '!'
313 * 0 otherwise.
314 */
315 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
316 {
317 int type = MATCH_FULL;
318 int i;
319
320 if (buff[0] == '!') {
321 *not = 1;
322 buff++;
323 len--;
324 } else
325 *not = 0;
326
327 *search = buff;
328
329 for (i = 0; i < len; i++) {
330 if (buff[i] == '*') {
331 if (!i) {
332 *search = buff + 1;
333 type = MATCH_END_ONLY;
334 } else {
335 if (type == MATCH_END_ONLY)
336 type = MATCH_MIDDLE_ONLY;
337 else
338 type = MATCH_FRONT_ONLY;
339 buff[i] = 0;
340 break;
341 }
342 }
343 }
344
345 return type;
346 }
347
348 static void filter_build_regex(struct filter_pred *pred)
349 {
350 struct regex *r = &pred->regex;
351 char *search;
352 enum regex_type type = MATCH_FULL;
353 int not = 0;
354
355 if (pred->op == OP_GLOB) {
356 type = filter_parse_regex(r->pattern, r->len, &search, &not);
357 r->len = strlen(search);
358 memmove(r->pattern, search, r->len+1);
359 }
360
361 switch (type) {
362 case MATCH_FULL:
363 r->match = regex_match_full;
364 break;
365 case MATCH_FRONT_ONLY:
366 r->match = regex_match_front;
367 break;
368 case MATCH_MIDDLE_ONLY:
369 r->match = regex_match_middle;
370 break;
371 case MATCH_END_ONLY:
372 r->match = regex_match_end;
373 break;
374 }
375
376 pred->not ^= not;
377 }
378
379 enum move_type {
380 MOVE_DOWN,
381 MOVE_UP_FROM_LEFT,
382 MOVE_UP_FROM_RIGHT
383 };
384
385 static struct filter_pred *
386 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
387 int index, enum move_type *move)
388 {
389 if (pred->parent & FILTER_PRED_IS_RIGHT)
390 *move = MOVE_UP_FROM_RIGHT;
391 else
392 *move = MOVE_UP_FROM_LEFT;
393 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
394
395 return pred;
396 }
397
398 enum walk_return {
399 WALK_PRED_ABORT,
400 WALK_PRED_PARENT,
401 WALK_PRED_DEFAULT,
402 };
403
404 typedef int (*filter_pred_walkcb_t) (enum move_type move,
405 struct filter_pred *pred,
406 int *err, void *data);
407
408 static int walk_pred_tree(struct filter_pred *preds,
409 struct filter_pred *root,
410 filter_pred_walkcb_t cb, void *data)
411 {
412 struct filter_pred *pred = root;
413 enum move_type move = MOVE_DOWN;
414 int done = 0;
415
416 if (!preds)
417 return -EINVAL;
418
419 do {
420 int err = 0, ret;
421
422 ret = cb(move, pred, &err, data);
423 if (ret == WALK_PRED_ABORT)
424 return err;
425 if (ret == WALK_PRED_PARENT)
426 goto get_parent;
427
428 switch (move) {
429 case MOVE_DOWN:
430 if (pred->left != FILTER_PRED_INVALID) {
431 pred = &preds[pred->left];
432 continue;
433 }
434 goto get_parent;
435 case MOVE_UP_FROM_LEFT:
436 pred = &preds[pred->right];
437 move = MOVE_DOWN;
438 continue;
439 case MOVE_UP_FROM_RIGHT:
440 get_parent:
441 if (pred == root)
442 break;
443 pred = get_pred_parent(pred, preds,
444 pred->parent,
445 &move);
446 continue;
447 }
448 done = 1;
449 } while (!done);
450
451 /* We are fine. */
452 return 0;
453 }
454
455 /*
456 * A series of AND or ORs where found together. Instead of
457 * climbing up and down the tree branches, an array of the
458 * ops were made in order of checks. We can just move across
459 * the array and short circuit if needed.
460 */
461 static int process_ops(struct filter_pred *preds,
462 struct filter_pred *op, void *rec)
463 {
464 struct filter_pred *pred;
465 int match = 0;
466 int type;
467 int i;
468
469 /*
470 * Micro-optimization: We set type to true if op
471 * is an OR and false otherwise (AND). Then we
472 * just need to test if the match is equal to
473 * the type, and if it is, we can short circuit the
474 * rest of the checks:
475 *
476 * if ((match && op->op == OP_OR) ||
477 * (!match && op->op == OP_AND))
478 * return match;
479 */
480 type = op->op == OP_OR;
481
482 for (i = 0; i < op->val; i++) {
483 pred = &preds[op->ops[i]];
484 if (!WARN_ON_ONCE(!pred->fn))
485 match = pred->fn(pred, rec);
486 if (!!match == type)
487 return match;
488 }
489 return match;
490 }
491
492 struct filter_match_preds_data {
493 struct filter_pred *preds;
494 int match;
495 void *rec;
496 };
497
498 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
499 int *err, void *data)
500 {
501 struct filter_match_preds_data *d = data;
502
503 *err = 0;
504 switch (move) {
505 case MOVE_DOWN:
506 /* only AND and OR have children */
507 if (pred->left != FILTER_PRED_INVALID) {
508 /* If ops is set, then it was folded. */
509 if (!pred->ops)
510 return WALK_PRED_DEFAULT;
511 /* We can treat folded ops as a leaf node */
512 d->match = process_ops(d->preds, pred, d->rec);
513 } else {
514 if (!WARN_ON_ONCE(!pred->fn))
515 d->match = pred->fn(pred, d->rec);
516 }
517
518 return WALK_PRED_PARENT;
519 case MOVE_UP_FROM_LEFT:
520 /*
521 * Check for short circuits.
522 *
523 * Optimization: !!match == (pred->op == OP_OR)
524 * is the same as:
525 * if ((match && pred->op == OP_OR) ||
526 * (!match && pred->op == OP_AND))
527 */
528 if (!!d->match == (pred->op == OP_OR))
529 return WALK_PRED_PARENT;
530 break;
531 case MOVE_UP_FROM_RIGHT:
532 break;
533 }
534
535 return WALK_PRED_DEFAULT;
536 }
537
538 /* return 1 if event matches, 0 otherwise (discard) */
539 int filter_match_preds(struct event_filter *filter, void *rec)
540 {
541 struct filter_pred *preds;
542 struct filter_pred *root;
543 struct filter_match_preds_data data = {
544 /* match is currently meaningless */
545 .match = -1,
546 .rec = rec,
547 };
548 int n_preds, ret;
549
550 /* no filter is considered a match */
551 if (!filter)
552 return 1;
553
554 n_preds = filter->n_preds;
555 if (!n_preds)
556 return 1;
557
558 /*
559 * n_preds, root and filter->preds are protect with preemption disabled.
560 */
561 root = rcu_dereference_sched(filter->root);
562 if (!root)
563 return 1;
564
565 data.preds = preds = rcu_dereference_sched(filter->preds);
566 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
567 WARN_ON(ret);
568 return data.match;
569 }
570 EXPORT_SYMBOL_GPL(filter_match_preds);
571
572 static void parse_error(struct filter_parse_state *ps, int err, int pos)
573 {
574 ps->lasterr = err;
575 ps->lasterr_pos = pos;
576 }
577
578 static void remove_filter_string(struct event_filter *filter)
579 {
580 if (!filter)
581 return;
582
583 kfree(filter->filter_string);
584 filter->filter_string = NULL;
585 }
586
587 static int replace_filter_string(struct event_filter *filter,
588 char *filter_string)
589 {
590 kfree(filter->filter_string);
591 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
592 if (!filter->filter_string)
593 return -ENOMEM;
594
595 return 0;
596 }
597
598 static int append_filter_string(struct event_filter *filter,
599 char *string)
600 {
601 int newlen;
602 char *new_filter_string;
603
604 BUG_ON(!filter->filter_string);
605 newlen = strlen(filter->filter_string) + strlen(string) + 1;
606 new_filter_string = kmalloc(newlen, GFP_KERNEL);
607 if (!new_filter_string)
608 return -ENOMEM;
609
610 strcpy(new_filter_string, filter->filter_string);
611 strcat(new_filter_string, string);
612 kfree(filter->filter_string);
613 filter->filter_string = new_filter_string;
614
615 return 0;
616 }
617
618 static void append_filter_err(struct filter_parse_state *ps,
619 struct event_filter *filter)
620 {
621 int pos = ps->lasterr_pos;
622 char *buf, *pbuf;
623
624 buf = (char *)__get_free_page(GFP_TEMPORARY);
625 if (!buf)
626 return;
627
628 append_filter_string(filter, "\n");
629 memset(buf, ' ', PAGE_SIZE);
630 if (pos > PAGE_SIZE - 128)
631 pos = 0;
632 buf[pos] = '^';
633 pbuf = &buf[pos] + 1;
634
635 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
636 append_filter_string(filter, buf);
637 free_page((unsigned long) buf);
638 }
639
640 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
641 {
642 struct event_filter *filter;
643
644 mutex_lock(&event_mutex);
645 filter = call->filter;
646 if (filter && filter->filter_string)
647 trace_seq_printf(s, "%s\n", filter->filter_string);
648 else
649 trace_seq_printf(s, "none\n");
650 mutex_unlock(&event_mutex);
651 }
652
653 void print_subsystem_event_filter(struct event_subsystem *system,
654 struct trace_seq *s)
655 {
656 struct event_filter *filter;
657
658 mutex_lock(&event_mutex);
659 filter = system->filter;
660 if (filter && filter->filter_string)
661 trace_seq_printf(s, "%s\n", filter->filter_string);
662 else
663 trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
664 mutex_unlock(&event_mutex);
665 }
666
667 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
668 {
669 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
670 if (!stack->preds)
671 return -ENOMEM;
672 stack->index = n_preds;
673 return 0;
674 }
675
676 static void __free_pred_stack(struct pred_stack *stack)
677 {
678 kfree(stack->preds);
679 stack->index = 0;
680 }
681
682 static int __push_pred_stack(struct pred_stack *stack,
683 struct filter_pred *pred)
684 {
685 int index = stack->index;
686
687 if (WARN_ON(index == 0))
688 return -ENOSPC;
689
690 stack->preds[--index] = pred;
691 stack->index = index;
692 return 0;
693 }
694
695 static struct filter_pred *
696 __pop_pred_stack(struct pred_stack *stack)
697 {
698 struct filter_pred *pred;
699 int index = stack->index;
700
701 pred = stack->preds[index++];
702 if (!pred)
703 return NULL;
704
705 stack->index = index;
706 return pred;
707 }
708
709 static int filter_set_pred(struct event_filter *filter,
710 int idx,
711 struct pred_stack *stack,
712 struct filter_pred *src)
713 {
714 struct filter_pred *dest = &filter->preds[idx];
715 struct filter_pred *left;
716 struct filter_pred *right;
717
718 *dest = *src;
719 dest->index = idx;
720
721 if (dest->op == OP_OR || dest->op == OP_AND) {
722 right = __pop_pred_stack(stack);
723 left = __pop_pred_stack(stack);
724 if (!left || !right)
725 return -EINVAL;
726 /*
727 * If both children can be folded
728 * and they are the same op as this op or a leaf,
729 * then this op can be folded.
730 */
731 if (left->index & FILTER_PRED_FOLD &&
732 (left->op == dest->op ||
733 left->left == FILTER_PRED_INVALID) &&
734 right->index & FILTER_PRED_FOLD &&
735 (right->op == dest->op ||
736 right->left == FILTER_PRED_INVALID))
737 dest->index |= FILTER_PRED_FOLD;
738
739 dest->left = left->index & ~FILTER_PRED_FOLD;
740 dest->right = right->index & ~FILTER_PRED_FOLD;
741 left->parent = dest->index & ~FILTER_PRED_FOLD;
742 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
743 } else {
744 /*
745 * Make dest->left invalid to be used as a quick
746 * way to know this is a leaf node.
747 */
748 dest->left = FILTER_PRED_INVALID;
749
750 /* All leafs allow folding the parent ops. */
751 dest->index |= FILTER_PRED_FOLD;
752 }
753
754 return __push_pred_stack(stack, dest);
755 }
756
757 static void __free_preds(struct event_filter *filter)
758 {
759 int i;
760
761 if (filter->preds) {
762 for (i = 0; i < filter->n_preds; i++)
763 kfree(filter->preds[i].ops);
764 kfree(filter->preds);
765 filter->preds = NULL;
766 }
767 filter->a_preds = 0;
768 filter->n_preds = 0;
769 }
770
771 static void filter_disable(struct ftrace_event_call *call)
772 {
773 call->flags &= ~TRACE_EVENT_FL_FILTERED;
774 }
775
776 static void __free_filter(struct event_filter *filter)
777 {
778 if (!filter)
779 return;
780
781 __free_preds(filter);
782 kfree(filter->filter_string);
783 kfree(filter);
784 }
785
786 /*
787 * Called when destroying the ftrace_event_call.
788 * The call is being freed, so we do not need to worry about
789 * the call being currently used. This is for module code removing
790 * the tracepoints from within it.
791 */
792 void destroy_preds(struct ftrace_event_call *call)
793 {
794 __free_filter(call->filter);
795 call->filter = NULL;
796 }
797
798 static struct event_filter *__alloc_filter(void)
799 {
800 struct event_filter *filter;
801
802 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
803 return filter;
804 }
805
806 static int __alloc_preds(struct event_filter *filter, int n_preds)
807 {
808 struct filter_pred *pred;
809 int i;
810
811 if (filter->preds)
812 __free_preds(filter);
813
814 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
815
816 if (!filter->preds)
817 return -ENOMEM;
818
819 filter->a_preds = n_preds;
820 filter->n_preds = 0;
821
822 for (i = 0; i < n_preds; i++) {
823 pred = &filter->preds[i];
824 pred->fn = filter_pred_none;
825 }
826
827 return 0;
828 }
829
830 static void filter_free_subsystem_preds(struct event_subsystem *system)
831 {
832 struct ftrace_event_call *call;
833
834 list_for_each_entry(call, &ftrace_events, list) {
835 if (strcmp(call->class->system, system->name) != 0)
836 continue;
837
838 filter_disable(call);
839 remove_filter_string(call->filter);
840 }
841 }
842
843 static void filter_free_subsystem_filters(struct event_subsystem *system)
844 {
845 struct ftrace_event_call *call;
846
847 list_for_each_entry(call, &ftrace_events, list) {
848 if (strcmp(call->class->system, system->name) != 0)
849 continue;
850 __free_filter(call->filter);
851 call->filter = NULL;
852 }
853 }
854
855 static int filter_add_pred(struct filter_parse_state *ps,
856 struct event_filter *filter,
857 struct filter_pred *pred,
858 struct pred_stack *stack)
859 {
860 int err;
861
862 if (WARN_ON(filter->n_preds == filter->a_preds)) {
863 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
864 return -ENOSPC;
865 }
866
867 err = filter_set_pred(filter, filter->n_preds, stack, pred);
868 if (err)
869 return err;
870
871 filter->n_preds++;
872
873 return 0;
874 }
875
876 int filter_assign_type(const char *type)
877 {
878 if (strstr(type, "__data_loc") && strstr(type, "char"))
879 return FILTER_DYN_STRING;
880
881 if (strchr(type, '[') && strstr(type, "char"))
882 return FILTER_STATIC_STRING;
883
884 return FILTER_OTHER;
885 }
886
887 static bool is_function_field(struct ftrace_event_field *field)
888 {
889 return field->filter_type == FILTER_TRACE_FN;
890 }
891
892 static bool is_string_field(struct ftrace_event_field *field)
893 {
894 return field->filter_type == FILTER_DYN_STRING ||
895 field->filter_type == FILTER_STATIC_STRING ||
896 field->filter_type == FILTER_PTR_STRING;
897 }
898
899 static int is_legal_op(struct ftrace_event_field *field, int op)
900 {
901 if (is_string_field(field) &&
902 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
903 return 0;
904 if (!is_string_field(field) && op == OP_GLOB)
905 return 0;
906
907 return 1;
908 }
909
910 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
911 int field_is_signed)
912 {
913 filter_pred_fn_t fn = NULL;
914
915 switch (field_size) {
916 case 8:
917 if (op == OP_EQ || op == OP_NE)
918 fn = filter_pred_64;
919 else if (field_is_signed)
920 fn = filter_pred_s64;
921 else
922 fn = filter_pred_u64;
923 break;
924 case 4:
925 if (op == OP_EQ || op == OP_NE)
926 fn = filter_pred_32;
927 else if (field_is_signed)
928 fn = filter_pred_s32;
929 else
930 fn = filter_pred_u32;
931 break;
932 case 2:
933 if (op == OP_EQ || op == OP_NE)
934 fn = filter_pred_16;
935 else if (field_is_signed)
936 fn = filter_pred_s16;
937 else
938 fn = filter_pred_u16;
939 break;
940 case 1:
941 if (op == OP_EQ || op == OP_NE)
942 fn = filter_pred_8;
943 else if (field_is_signed)
944 fn = filter_pred_s8;
945 else
946 fn = filter_pred_u8;
947 break;
948 }
949
950 return fn;
951 }
952
953 static int init_pred(struct filter_parse_state *ps,
954 struct ftrace_event_field *field,
955 struct filter_pred *pred)
956
957 {
958 filter_pred_fn_t fn = filter_pred_none;
959 unsigned long long val;
960 int ret;
961
962 pred->offset = field->offset;
963
964 if (!is_legal_op(field, pred->op)) {
965 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
966 return -EINVAL;
967 }
968
969 if (is_string_field(field)) {
970 filter_build_regex(pred);
971
972 if (field->filter_type == FILTER_STATIC_STRING) {
973 fn = filter_pred_string;
974 pred->regex.field_len = field->size;
975 } else if (field->filter_type == FILTER_DYN_STRING)
976 fn = filter_pred_strloc;
977 else
978 fn = filter_pred_pchar;
979 } else if (is_function_field(field)) {
980 if (strcmp(field->name, "ip")) {
981 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
982 return -EINVAL;
983 }
984 } else {
985 if (field->is_signed)
986 ret = kstrtoll(pred->regex.pattern, 0, &val);
987 else
988 ret = kstrtoull(pred->regex.pattern, 0, &val);
989 if (ret) {
990 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
991 return -EINVAL;
992 }
993 pred->val = val;
994
995 fn = select_comparison_fn(pred->op, field->size,
996 field->is_signed);
997 if (!fn) {
998 parse_error(ps, FILT_ERR_INVALID_OP, 0);
999 return -EINVAL;
1000 }
1001 }
1002
1003 if (pred->op == OP_NE)
1004 pred->not = 1;
1005
1006 pred->fn = fn;
1007 return 0;
1008 }
1009
1010 static void parse_init(struct filter_parse_state *ps,
1011 struct filter_op *ops,
1012 char *infix_string)
1013 {
1014 memset(ps, '\0', sizeof(*ps));
1015
1016 ps->infix.string = infix_string;
1017 ps->infix.cnt = strlen(infix_string);
1018 ps->ops = ops;
1019
1020 INIT_LIST_HEAD(&ps->opstack);
1021 INIT_LIST_HEAD(&ps->postfix);
1022 }
1023
1024 static char infix_next(struct filter_parse_state *ps)
1025 {
1026 ps->infix.cnt--;
1027
1028 return ps->infix.string[ps->infix.tail++];
1029 }
1030
1031 static char infix_peek(struct filter_parse_state *ps)
1032 {
1033 if (ps->infix.tail == strlen(ps->infix.string))
1034 return 0;
1035
1036 return ps->infix.string[ps->infix.tail];
1037 }
1038
1039 static void infix_advance(struct filter_parse_state *ps)
1040 {
1041 ps->infix.cnt--;
1042 ps->infix.tail++;
1043 }
1044
1045 static inline int is_precedence_lower(struct filter_parse_state *ps,
1046 int a, int b)
1047 {
1048 return ps->ops[a].precedence < ps->ops[b].precedence;
1049 }
1050
1051 static inline int is_op_char(struct filter_parse_state *ps, char c)
1052 {
1053 int i;
1054
1055 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1056 if (ps->ops[i].string[0] == c)
1057 return 1;
1058 }
1059
1060 return 0;
1061 }
1062
1063 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1064 {
1065 char nextc = infix_peek(ps);
1066 char opstr[3];
1067 int i;
1068
1069 opstr[0] = firstc;
1070 opstr[1] = nextc;
1071 opstr[2] = '\0';
1072
1073 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1074 if (!strcmp(opstr, ps->ops[i].string)) {
1075 infix_advance(ps);
1076 return ps->ops[i].id;
1077 }
1078 }
1079
1080 opstr[1] = '\0';
1081
1082 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1083 if (!strcmp(opstr, ps->ops[i].string))
1084 return ps->ops[i].id;
1085 }
1086
1087 return OP_NONE;
1088 }
1089
1090 static inline void clear_operand_string(struct filter_parse_state *ps)
1091 {
1092 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1093 ps->operand.tail = 0;
1094 }
1095
1096 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1097 {
1098 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1099 return -EINVAL;
1100
1101 ps->operand.string[ps->operand.tail++] = c;
1102
1103 return 0;
1104 }
1105
1106 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1107 {
1108 struct opstack_op *opstack_op;
1109
1110 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1111 if (!opstack_op)
1112 return -ENOMEM;
1113
1114 opstack_op->op = op;
1115 list_add(&opstack_op->list, &ps->opstack);
1116
1117 return 0;
1118 }
1119
1120 static int filter_opstack_empty(struct filter_parse_state *ps)
1121 {
1122 return list_empty(&ps->opstack);
1123 }
1124
1125 static int filter_opstack_top(struct filter_parse_state *ps)
1126 {
1127 struct opstack_op *opstack_op;
1128
1129 if (filter_opstack_empty(ps))
1130 return OP_NONE;
1131
1132 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1133
1134 return opstack_op->op;
1135 }
1136
1137 static int filter_opstack_pop(struct filter_parse_state *ps)
1138 {
1139 struct opstack_op *opstack_op;
1140 int op;
1141
1142 if (filter_opstack_empty(ps))
1143 return OP_NONE;
1144
1145 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1146 op = opstack_op->op;
1147 list_del(&opstack_op->list);
1148
1149 kfree(opstack_op);
1150
1151 return op;
1152 }
1153
1154 static void filter_opstack_clear(struct filter_parse_state *ps)
1155 {
1156 while (!filter_opstack_empty(ps))
1157 filter_opstack_pop(ps);
1158 }
1159
1160 static char *curr_operand(struct filter_parse_state *ps)
1161 {
1162 return ps->operand.string;
1163 }
1164
1165 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1166 {
1167 struct postfix_elt *elt;
1168
1169 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1170 if (!elt)
1171 return -ENOMEM;
1172
1173 elt->op = OP_NONE;
1174 elt->operand = kstrdup(operand, GFP_KERNEL);
1175 if (!elt->operand) {
1176 kfree(elt);
1177 return -ENOMEM;
1178 }
1179
1180 list_add_tail(&elt->list, &ps->postfix);
1181
1182 return 0;
1183 }
1184
1185 static int postfix_append_op(struct filter_parse_state *ps, int op)
1186 {
1187 struct postfix_elt *elt;
1188
1189 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1190 if (!elt)
1191 return -ENOMEM;
1192
1193 elt->op = op;
1194 elt->operand = NULL;
1195
1196 list_add_tail(&elt->list, &ps->postfix);
1197
1198 return 0;
1199 }
1200
1201 static void postfix_clear(struct filter_parse_state *ps)
1202 {
1203 struct postfix_elt *elt;
1204
1205 while (!list_empty(&ps->postfix)) {
1206 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1207 list_del(&elt->list);
1208 kfree(elt->operand);
1209 kfree(elt);
1210 }
1211 }
1212
1213 static int filter_parse(struct filter_parse_state *ps)
1214 {
1215 int in_string = 0;
1216 int op, top_op;
1217 char ch;
1218
1219 while ((ch = infix_next(ps))) {
1220 if (ch == '"') {
1221 in_string ^= 1;
1222 continue;
1223 }
1224
1225 if (in_string)
1226 goto parse_operand;
1227
1228 if (isspace(ch))
1229 continue;
1230
1231 if (is_op_char(ps, ch)) {
1232 op = infix_get_op(ps, ch);
1233 if (op == OP_NONE) {
1234 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1235 return -EINVAL;
1236 }
1237
1238 if (strlen(curr_operand(ps))) {
1239 postfix_append_operand(ps, curr_operand(ps));
1240 clear_operand_string(ps);
1241 }
1242
1243 while (!filter_opstack_empty(ps)) {
1244 top_op = filter_opstack_top(ps);
1245 if (!is_precedence_lower(ps, top_op, op)) {
1246 top_op = filter_opstack_pop(ps);
1247 postfix_append_op(ps, top_op);
1248 continue;
1249 }
1250 break;
1251 }
1252
1253 filter_opstack_push(ps, op);
1254 continue;
1255 }
1256
1257 if (ch == '(') {
1258 filter_opstack_push(ps, OP_OPEN_PAREN);
1259 continue;
1260 }
1261
1262 if (ch == ')') {
1263 if (strlen(curr_operand(ps))) {
1264 postfix_append_operand(ps, curr_operand(ps));
1265 clear_operand_string(ps);
1266 }
1267
1268 top_op = filter_opstack_pop(ps);
1269 while (top_op != OP_NONE) {
1270 if (top_op == OP_OPEN_PAREN)
1271 break;
1272 postfix_append_op(ps, top_op);
1273 top_op = filter_opstack_pop(ps);
1274 }
1275 if (top_op == OP_NONE) {
1276 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1277 return -EINVAL;
1278 }
1279 continue;
1280 }
1281 parse_operand:
1282 if (append_operand_char(ps, ch)) {
1283 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1284 return -EINVAL;
1285 }
1286 }
1287
1288 if (strlen(curr_operand(ps)))
1289 postfix_append_operand(ps, curr_operand(ps));
1290
1291 while (!filter_opstack_empty(ps)) {
1292 top_op = filter_opstack_pop(ps);
1293 if (top_op == OP_NONE)
1294 break;
1295 if (top_op == OP_OPEN_PAREN) {
1296 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1297 return -EINVAL;
1298 }
1299 postfix_append_op(ps, top_op);
1300 }
1301
1302 return 0;
1303 }
1304
1305 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1306 struct ftrace_event_call *call,
1307 int op, char *operand1, char *operand2)
1308 {
1309 struct ftrace_event_field *field;
1310 static struct filter_pred pred;
1311
1312 memset(&pred, 0, sizeof(pred));
1313 pred.op = op;
1314
1315 if (op == OP_AND || op == OP_OR)
1316 return &pred;
1317
1318 if (!operand1 || !operand2) {
1319 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1320 return NULL;
1321 }
1322
1323 field = trace_find_event_field(call, operand1);
1324 if (!field) {
1325 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1326 return NULL;
1327 }
1328
1329 strcpy(pred.regex.pattern, operand2);
1330 pred.regex.len = strlen(pred.regex.pattern);
1331 pred.field = field;
1332 return init_pred(ps, field, &pred) ? NULL : &pred;
1333 }
1334
1335 static int check_preds(struct filter_parse_state *ps)
1336 {
1337 int n_normal_preds = 0, n_logical_preds = 0;
1338 struct postfix_elt *elt;
1339
1340 list_for_each_entry(elt, &ps->postfix, list) {
1341 if (elt->op == OP_NONE)
1342 continue;
1343
1344 if (elt->op == OP_AND || elt->op == OP_OR) {
1345 n_logical_preds++;
1346 continue;
1347 }
1348 n_normal_preds++;
1349 }
1350
1351 if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1352 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1353 return -EINVAL;
1354 }
1355
1356 return 0;
1357 }
1358
1359 static int count_preds(struct filter_parse_state *ps)
1360 {
1361 struct postfix_elt *elt;
1362 int n_preds = 0;
1363
1364 list_for_each_entry(elt, &ps->postfix, list) {
1365 if (elt->op == OP_NONE)
1366 continue;
1367 n_preds++;
1368 }
1369
1370 return n_preds;
1371 }
1372
1373 struct check_pred_data {
1374 int count;
1375 int max;
1376 };
1377
1378 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1379 int *err, void *data)
1380 {
1381 struct check_pred_data *d = data;
1382
1383 if (WARN_ON(d->count++ > d->max)) {
1384 *err = -EINVAL;
1385 return WALK_PRED_ABORT;
1386 }
1387 return WALK_PRED_DEFAULT;
1388 }
1389
1390 /*
1391 * The tree is walked at filtering of an event. If the tree is not correctly
1392 * built, it may cause an infinite loop. Check here that the tree does
1393 * indeed terminate.
1394 */
1395 static int check_pred_tree(struct event_filter *filter,
1396 struct filter_pred *root)
1397 {
1398 struct check_pred_data data = {
1399 /*
1400 * The max that we can hit a node is three times.
1401 * Once going down, once coming up from left, and
1402 * once coming up from right. This is more than enough
1403 * since leafs are only hit a single time.
1404 */
1405 .max = 3 * filter->n_preds,
1406 .count = 0,
1407 };
1408
1409 return walk_pred_tree(filter->preds, root,
1410 check_pred_tree_cb, &data);
1411 }
1412
1413 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1414 int *err, void *data)
1415 {
1416 int *count = data;
1417
1418 if ((move == MOVE_DOWN) &&
1419 (pred->left == FILTER_PRED_INVALID))
1420 (*count)++;
1421
1422 return WALK_PRED_DEFAULT;
1423 }
1424
1425 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1426 {
1427 int count = 0, ret;
1428
1429 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1430 WARN_ON(ret);
1431 return count;
1432 }
1433
1434 struct fold_pred_data {
1435 struct filter_pred *root;
1436 int count;
1437 int children;
1438 };
1439
1440 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1441 int *err, void *data)
1442 {
1443 struct fold_pred_data *d = data;
1444 struct filter_pred *root = d->root;
1445
1446 if (move != MOVE_DOWN)
1447 return WALK_PRED_DEFAULT;
1448 if (pred->left != FILTER_PRED_INVALID)
1449 return WALK_PRED_DEFAULT;
1450
1451 if (WARN_ON(d->count == d->children)) {
1452 *err = -EINVAL;
1453 return WALK_PRED_ABORT;
1454 }
1455
1456 pred->index &= ~FILTER_PRED_FOLD;
1457 root->ops[d->count++] = pred->index;
1458 return WALK_PRED_DEFAULT;
1459 }
1460
1461 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1462 {
1463 struct fold_pred_data data = {
1464 .root = root,
1465 .count = 0,
1466 };
1467 int children;
1468
1469 /* No need to keep the fold flag */
1470 root->index &= ~FILTER_PRED_FOLD;
1471
1472 /* If the root is a leaf then do nothing */
1473 if (root->left == FILTER_PRED_INVALID)
1474 return 0;
1475
1476 /* count the children */
1477 children = count_leafs(preds, &preds[root->left]);
1478 children += count_leafs(preds, &preds[root->right]);
1479
1480 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1481 if (!root->ops)
1482 return -ENOMEM;
1483
1484 root->val = children;
1485 data.children = children;
1486 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1487 }
1488
1489 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1490 int *err, void *data)
1491 {
1492 struct filter_pred *preds = data;
1493
1494 if (move != MOVE_DOWN)
1495 return WALK_PRED_DEFAULT;
1496 if (!(pred->index & FILTER_PRED_FOLD))
1497 return WALK_PRED_DEFAULT;
1498
1499 *err = fold_pred(preds, pred);
1500 if (*err)
1501 return WALK_PRED_ABORT;
1502
1503 /* eveyrhing below is folded, continue with parent */
1504 return WALK_PRED_PARENT;
1505 }
1506
1507 /*
1508 * To optimize the processing of the ops, if we have several "ors" or
1509 * "ands" together, we can put them in an array and process them all
1510 * together speeding up the filter logic.
1511 */
1512 static int fold_pred_tree(struct event_filter *filter,
1513 struct filter_pred *root)
1514 {
1515 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1516 filter->preds);
1517 }
1518
1519 static int replace_preds(struct ftrace_event_call *call,
1520 struct event_filter *filter,
1521 struct filter_parse_state *ps,
1522 char *filter_string,
1523 bool dry_run)
1524 {
1525 char *operand1 = NULL, *operand2 = NULL;
1526 struct filter_pred *pred;
1527 struct filter_pred *root;
1528 struct postfix_elt *elt;
1529 struct pred_stack stack = { }; /* init to NULL */
1530 int err;
1531 int n_preds = 0;
1532
1533 n_preds = count_preds(ps);
1534 if (n_preds >= MAX_FILTER_PRED) {
1535 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1536 return -ENOSPC;
1537 }
1538
1539 err = check_preds(ps);
1540 if (err)
1541 return err;
1542
1543 if (!dry_run) {
1544 err = __alloc_pred_stack(&stack, n_preds);
1545 if (err)
1546 return err;
1547 err = __alloc_preds(filter, n_preds);
1548 if (err)
1549 goto fail;
1550 }
1551
1552 n_preds = 0;
1553 list_for_each_entry(elt, &ps->postfix, list) {
1554 if (elt->op == OP_NONE) {
1555 if (!operand1)
1556 operand1 = elt->operand;
1557 else if (!operand2)
1558 operand2 = elt->operand;
1559 else {
1560 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1561 err = -EINVAL;
1562 goto fail;
1563 }
1564 continue;
1565 }
1566
1567 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1568 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1569 err = -ENOSPC;
1570 goto fail;
1571 }
1572
1573 pred = create_pred(ps, call, elt->op, operand1, operand2);
1574 if (!pred) {
1575 err = -EINVAL;
1576 goto fail;
1577 }
1578
1579 if (!dry_run) {
1580 err = filter_add_pred(ps, filter, pred, &stack);
1581 if (err)
1582 goto fail;
1583 }
1584
1585 operand1 = operand2 = NULL;
1586 }
1587
1588 if (!dry_run) {
1589 /* We should have one item left on the stack */
1590 pred = __pop_pred_stack(&stack);
1591 if (!pred)
1592 return -EINVAL;
1593 /* This item is where we start from in matching */
1594 root = pred;
1595 /* Make sure the stack is empty */
1596 pred = __pop_pred_stack(&stack);
1597 if (WARN_ON(pred)) {
1598 err = -EINVAL;
1599 filter->root = NULL;
1600 goto fail;
1601 }
1602 err = check_pred_tree(filter, root);
1603 if (err)
1604 goto fail;
1605
1606 /* Optimize the tree */
1607 err = fold_pred_tree(filter, root);
1608 if (err)
1609 goto fail;
1610
1611 /* We don't set root until we know it works */
1612 barrier();
1613 filter->root = root;
1614 }
1615
1616 err = 0;
1617 fail:
1618 __free_pred_stack(&stack);
1619 return err;
1620 }
1621
1622 struct filter_list {
1623 struct list_head list;
1624 struct event_filter *filter;
1625 };
1626
1627 static int replace_system_preds(struct event_subsystem *system,
1628 struct filter_parse_state *ps,
1629 char *filter_string)
1630 {
1631 struct ftrace_event_call *call;
1632 struct filter_list *filter_item;
1633 struct filter_list *tmp;
1634 LIST_HEAD(filter_list);
1635 bool fail = true;
1636 int err;
1637
1638 list_for_each_entry(call, &ftrace_events, list) {
1639
1640 if (strcmp(call->class->system, system->name) != 0)
1641 continue;
1642
1643 /*
1644 * Try to see if the filter can be applied
1645 * (filter arg is ignored on dry_run)
1646 */
1647 err = replace_preds(call, NULL, ps, filter_string, true);
1648 if (err)
1649 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1650 else
1651 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1652 }
1653
1654 list_for_each_entry(call, &ftrace_events, list) {
1655 struct event_filter *filter;
1656
1657 if (strcmp(call->class->system, system->name) != 0)
1658 continue;
1659
1660 if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
1661 continue;
1662
1663 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1664 if (!filter_item)
1665 goto fail_mem;
1666
1667 list_add_tail(&filter_item->list, &filter_list);
1668
1669 filter_item->filter = __alloc_filter();
1670 if (!filter_item->filter)
1671 goto fail_mem;
1672 filter = filter_item->filter;
1673
1674 /* Can only fail on no memory */
1675 err = replace_filter_string(filter, filter_string);
1676 if (err)
1677 goto fail_mem;
1678
1679 err = replace_preds(call, filter, ps, filter_string, false);
1680 if (err) {
1681 filter_disable(call);
1682 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1683 append_filter_err(ps, filter);
1684 } else
1685 call->flags |= TRACE_EVENT_FL_FILTERED;
1686 /*
1687 * Regardless of if this returned an error, we still
1688 * replace the filter for the call.
1689 */
1690 filter = call->filter;
1691 rcu_assign_pointer(call->filter, filter_item->filter);
1692 filter_item->filter = filter;
1693
1694 fail = false;
1695 }
1696
1697 if (fail)
1698 goto fail;
1699
1700 /*
1701 * The calls can still be using the old filters.
1702 * Do a synchronize_sched() to ensure all calls are
1703 * done with them before we free them.
1704 */
1705 synchronize_sched();
1706 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1707 __free_filter(filter_item->filter);
1708 list_del(&filter_item->list);
1709 kfree(filter_item);
1710 }
1711 return 0;
1712 fail:
1713 /* No call succeeded */
1714 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1715 list_del(&filter_item->list);
1716 kfree(filter_item);
1717 }
1718 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1719 return -EINVAL;
1720 fail_mem:
1721 /* If any call succeeded, we still need to sync */
1722 if (!fail)
1723 synchronize_sched();
1724 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1725 __free_filter(filter_item->filter);
1726 list_del(&filter_item->list);
1727 kfree(filter_item);
1728 }
1729 return -ENOMEM;
1730 }
1731
1732 static int create_filter_start(char *filter_str, bool set_str,
1733 struct filter_parse_state **psp,
1734 struct event_filter **filterp)
1735 {
1736 struct event_filter *filter;
1737 struct filter_parse_state *ps = NULL;
1738 int err = 0;
1739
1740 WARN_ON_ONCE(*psp || *filterp);
1741
1742 /* allocate everything, and if any fails, free all and fail */
1743 filter = __alloc_filter();
1744 if (filter && set_str)
1745 err = replace_filter_string(filter, filter_str);
1746
1747 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1748
1749 if (!filter || !ps || err) {
1750 kfree(ps);
1751 __free_filter(filter);
1752 return -ENOMEM;
1753 }
1754
1755 /* we're committed to creating a new filter */
1756 *filterp = filter;
1757 *psp = ps;
1758
1759 parse_init(ps, filter_ops, filter_str);
1760 err = filter_parse(ps);
1761 if (err && set_str)
1762 append_filter_err(ps, filter);
1763 return err;
1764 }
1765
1766 static void create_filter_finish(struct filter_parse_state *ps)
1767 {
1768 if (ps) {
1769 filter_opstack_clear(ps);
1770 postfix_clear(ps);
1771 kfree(ps);
1772 }
1773 }
1774
1775 /**
1776 * create_filter - create a filter for a ftrace_event_call
1777 * @call: ftrace_event_call to create a filter for
1778 * @filter_str: filter string
1779 * @set_str: remember @filter_str and enable detailed error in filter
1780 * @filterp: out param for created filter (always updated on return)
1781 *
1782 * Creates a filter for @call with @filter_str. If @set_str is %true,
1783 * @filter_str is copied and recorded in the new filter.
1784 *
1785 * On success, returns 0 and *@filterp points to the new filter. On
1786 * failure, returns -errno and *@filterp may point to %NULL or to a new
1787 * filter. In the latter case, the returned filter contains error
1788 * information if @set_str is %true and the caller is responsible for
1789 * freeing it.
1790 */
1791 static int create_filter(struct ftrace_event_call *call,
1792 char *filter_str, bool set_str,
1793 struct event_filter **filterp)
1794 {
1795 struct event_filter *filter = NULL;
1796 struct filter_parse_state *ps = NULL;
1797 int err;
1798
1799 err = create_filter_start(filter_str, set_str, &ps, &filter);
1800 if (!err) {
1801 err = replace_preds(call, filter, ps, filter_str, false);
1802 if (err && set_str)
1803 append_filter_err(ps, filter);
1804 }
1805 create_filter_finish(ps);
1806
1807 *filterp = filter;
1808 return err;
1809 }
1810
1811 /**
1812 * create_system_filter - create a filter for an event_subsystem
1813 * @system: event_subsystem to create a filter for
1814 * @filter_str: filter string
1815 * @filterp: out param for created filter (always updated on return)
1816 *
1817 * Identical to create_filter() except that it creates a subsystem filter
1818 * and always remembers @filter_str.
1819 */
1820 static int create_system_filter(struct event_subsystem *system,
1821 char *filter_str, struct event_filter **filterp)
1822 {
1823 struct event_filter *filter = NULL;
1824 struct filter_parse_state *ps = NULL;
1825 int err;
1826
1827 err = create_filter_start(filter_str, true, &ps, &filter);
1828 if (!err) {
1829 err = replace_system_preds(system, ps, filter_str);
1830 if (!err) {
1831 /* System filters just show a default message */
1832 kfree(filter->filter_string);
1833 filter->filter_string = NULL;
1834 } else {
1835 append_filter_err(ps, filter);
1836 }
1837 }
1838 create_filter_finish(ps);
1839
1840 *filterp = filter;
1841 return err;
1842 }
1843
1844 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1845 {
1846 struct event_filter *filter;
1847 int err = 0;
1848
1849 mutex_lock(&event_mutex);
1850
1851 if (!strcmp(strstrip(filter_string), "0")) {
1852 filter_disable(call);
1853 filter = call->filter;
1854 if (!filter)
1855 goto out_unlock;
1856 RCU_INIT_POINTER(call->filter, NULL);
1857 /* Make sure the filter is not being used */
1858 synchronize_sched();
1859 __free_filter(filter);
1860 goto out_unlock;
1861 }
1862
1863 err = create_filter(call, filter_string, true, &filter);
1864
1865 /*
1866 * Always swap the call filter with the new filter
1867 * even if there was an error. If there was an error
1868 * in the filter, we disable the filter and show the error
1869 * string
1870 */
1871 if (filter) {
1872 struct event_filter *tmp = call->filter;
1873
1874 if (!err)
1875 call->flags |= TRACE_EVENT_FL_FILTERED;
1876 else
1877 filter_disable(call);
1878
1879 rcu_assign_pointer(call->filter, filter);
1880
1881 if (tmp) {
1882 /* Make sure the call is done with the filter */
1883 synchronize_sched();
1884 __free_filter(tmp);
1885 }
1886 }
1887 out_unlock:
1888 mutex_unlock(&event_mutex);
1889
1890 return err;
1891 }
1892
1893 int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
1894 char *filter_string)
1895 {
1896 struct event_subsystem *system = dir->subsystem;
1897 struct event_filter *filter;
1898 int err = 0;
1899
1900 mutex_lock(&event_mutex);
1901
1902 /* Make sure the system still has events */
1903 if (!dir->nr_events) {
1904 err = -ENODEV;
1905 goto out_unlock;
1906 }
1907
1908 if (!strcmp(strstrip(filter_string), "0")) {
1909 filter_free_subsystem_preds(system);
1910 remove_filter_string(system->filter);
1911 filter = system->filter;
1912 system->filter = NULL;
1913 /* Ensure all filters are no longer used */
1914 synchronize_sched();
1915 filter_free_subsystem_filters(system);
1916 __free_filter(filter);
1917 goto out_unlock;
1918 }
1919
1920 err = create_system_filter(system, filter_string, &filter);
1921 if (filter) {
1922 /*
1923 * No event actually uses the system filter
1924 * we can free it without synchronize_sched().
1925 */
1926 __free_filter(system->filter);
1927 system->filter = filter;
1928 }
1929 out_unlock:
1930 mutex_unlock(&event_mutex);
1931
1932 return err;
1933 }
1934
1935 #ifdef CONFIG_PERF_EVENTS
1936
1937 void ftrace_profile_free_filter(struct perf_event *event)
1938 {
1939 struct event_filter *filter = event->filter;
1940
1941 event->filter = NULL;
1942 __free_filter(filter);
1943 }
1944
1945 struct function_filter_data {
1946 struct ftrace_ops *ops;
1947 int first_filter;
1948 int first_notrace;
1949 };
1950
1951 #ifdef CONFIG_FUNCTION_TRACER
1952 static char **
1953 ftrace_function_filter_re(char *buf, int len, int *count)
1954 {
1955 char *str, *sep, **re;
1956
1957 str = kstrndup(buf, len, GFP_KERNEL);
1958 if (!str)
1959 return NULL;
1960
1961 /*
1962 * The argv_split function takes white space
1963 * as a separator, so convert ',' into spaces.
1964 */
1965 while ((sep = strchr(str, ',')))
1966 *sep = ' ';
1967
1968 re = argv_split(GFP_KERNEL, str, count);
1969 kfree(str);
1970 return re;
1971 }
1972
1973 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
1974 int reset, char *re, int len)
1975 {
1976 int ret;
1977
1978 if (filter)
1979 ret = ftrace_set_filter(ops, re, len, reset);
1980 else
1981 ret = ftrace_set_notrace(ops, re, len, reset);
1982
1983 return ret;
1984 }
1985
1986 static int __ftrace_function_set_filter(int filter, char *buf, int len,
1987 struct function_filter_data *data)
1988 {
1989 int i, re_cnt, ret = -EINVAL;
1990 int *reset;
1991 char **re;
1992
1993 reset = filter ? &data->first_filter : &data->first_notrace;
1994
1995 /*
1996 * The 'ip' field could have multiple filters set, separated
1997 * either by space or comma. We first cut the filter and apply
1998 * all pieces separatelly.
1999 */
2000 re = ftrace_function_filter_re(buf, len, &re_cnt);
2001 if (!re)
2002 return -EINVAL;
2003
2004 for (i = 0; i < re_cnt; i++) {
2005 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2006 re[i], strlen(re[i]));
2007 if (ret)
2008 break;
2009
2010 if (*reset)
2011 *reset = 0;
2012 }
2013
2014 argv_free(re);
2015 return ret;
2016 }
2017
2018 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2019 {
2020 struct ftrace_event_field *field = pred->field;
2021
2022 if (leaf) {
2023 /*
2024 * Check the leaf predicate for function trace, verify:
2025 * - only '==' and '!=' is used
2026 * - the 'ip' field is used
2027 */
2028 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2029 return -EINVAL;
2030
2031 if (strcmp(field->name, "ip"))
2032 return -EINVAL;
2033 } else {
2034 /*
2035 * Check the non leaf predicate for function trace, verify:
2036 * - only '||' is used
2037 */
2038 if (pred->op != OP_OR)
2039 return -EINVAL;
2040 }
2041
2042 return 0;
2043 }
2044
2045 static int ftrace_function_set_filter_cb(enum move_type move,
2046 struct filter_pred *pred,
2047 int *err, void *data)
2048 {
2049 /* Checking the node is valid for function trace. */
2050 if ((move != MOVE_DOWN) ||
2051 (pred->left != FILTER_PRED_INVALID)) {
2052 *err = ftrace_function_check_pred(pred, 0);
2053 } else {
2054 *err = ftrace_function_check_pred(pred, 1);
2055 if (*err)
2056 return WALK_PRED_ABORT;
2057
2058 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2059 pred->regex.pattern,
2060 pred->regex.len,
2061 data);
2062 }
2063
2064 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2065 }
2066
2067 static int ftrace_function_set_filter(struct perf_event *event,
2068 struct event_filter *filter)
2069 {
2070 struct function_filter_data data = {
2071 .first_filter = 1,
2072 .first_notrace = 1,
2073 .ops = &event->ftrace_ops,
2074 };
2075
2076 return walk_pred_tree(filter->preds, filter->root,
2077 ftrace_function_set_filter_cb, &data);
2078 }
2079 #else
2080 static int ftrace_function_set_filter(struct perf_event *event,
2081 struct event_filter *filter)
2082 {
2083 return -ENODEV;
2084 }
2085 #endif /* CONFIG_FUNCTION_TRACER */
2086
2087 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2088 char *filter_str)
2089 {
2090 int err;
2091 struct event_filter *filter;
2092 struct ftrace_event_call *call;
2093
2094 mutex_lock(&event_mutex);
2095
2096 call = event->tp_event;
2097
2098 err = -EINVAL;
2099 if (!call)
2100 goto out_unlock;
2101
2102 err = -EEXIST;
2103 if (event->filter)
2104 goto out_unlock;
2105
2106 err = create_filter(call, filter_str, false, &filter);
2107 if (err)
2108 goto free_filter;
2109
2110 if (ftrace_event_is_function(call))
2111 err = ftrace_function_set_filter(event, filter);
2112 else
2113 event->filter = filter;
2114
2115 free_filter:
2116 if (err || ftrace_event_is_function(call))
2117 __free_filter(filter);
2118
2119 out_unlock:
2120 mutex_unlock(&event_mutex);
2121
2122 return err;
2123 }
2124
2125 #endif /* CONFIG_PERF_EVENTS */
2126
2127 #ifdef CONFIG_FTRACE_STARTUP_TEST
2128
2129 #include <linux/types.h>
2130 #include <linux/tracepoint.h>
2131
2132 #define CREATE_TRACE_POINTS
2133 #include "trace_events_filter_test.h"
2134
2135 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2136 { \
2137 .filter = FILTER, \
2138 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2139 .e = ve, .f = vf, .g = vg, .h = vh }, \
2140 .match = m, \
2141 .not_visited = nvisit, \
2142 }
2143 #define YES 1
2144 #define NO 0
2145
2146 static struct test_filter_data_t {
2147 char *filter;
2148 struct ftrace_raw_ftrace_test_filter rec;
2149 int match;
2150 char *not_visited;
2151 } test_filter_data[] = {
2152 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2153 "e == 1 && f == 1 && g == 1 && h == 1"
2154 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2155 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2156 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2157 #undef FILTER
2158 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2159 "e == 1 || f == 1 || g == 1 || h == 1"
2160 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2161 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2162 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2163 #undef FILTER
2164 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2165 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2166 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2167 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2168 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2169 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2170 #undef FILTER
2171 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2172 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2173 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2174 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2175 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2176 #undef FILTER
2177 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2178 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2179 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2180 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2181 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2182 #undef FILTER
2183 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2184 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2185 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2186 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2187 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2188 #undef FILTER
2189 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2190 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2191 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2192 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2193 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2194 #undef FILTER
2195 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2196 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2197 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2198 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2199 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2200 };
2201
2202 #undef DATA_REC
2203 #undef FILTER
2204 #undef YES
2205 #undef NO
2206
2207 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2208
2209 static int test_pred_visited;
2210
2211 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2212 {
2213 struct ftrace_event_field *field = pred->field;
2214
2215 test_pred_visited = 1;
2216 printk(KERN_INFO "\npred visited %s\n", field->name);
2217 return 1;
2218 }
2219
2220 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2221 int *err, void *data)
2222 {
2223 char *fields = data;
2224
2225 if ((move == MOVE_DOWN) &&
2226 (pred->left == FILTER_PRED_INVALID)) {
2227 struct ftrace_event_field *field = pred->field;
2228
2229 if (!field) {
2230 WARN(1, "all leafs should have field defined");
2231 return WALK_PRED_DEFAULT;
2232 }
2233 if (!strchr(fields, *field->name))
2234 return WALK_PRED_DEFAULT;
2235
2236 WARN_ON(!pred->fn);
2237 pred->fn = test_pred_visited_fn;
2238 }
2239 return WALK_PRED_DEFAULT;
2240 }
2241
2242 static __init int ftrace_test_event_filter(void)
2243 {
2244 int i;
2245
2246 printk(KERN_INFO "Testing ftrace filter: ");
2247
2248 for (i = 0; i < DATA_CNT; i++) {
2249 struct event_filter *filter = NULL;
2250 struct test_filter_data_t *d = &test_filter_data[i];
2251 int err;
2252
2253 err = create_filter(&event_ftrace_test_filter, d->filter,
2254 false, &filter);
2255 if (err) {
2256 printk(KERN_INFO
2257 "Failed to get filter for '%s', err %d\n",
2258 d->filter, err);
2259 __free_filter(filter);
2260 break;
2261 }
2262
2263 /*
2264 * The preemption disabling is not really needed for self
2265 * tests, but the rcu dereference will complain without it.
2266 */
2267 preempt_disable();
2268 if (*d->not_visited)
2269 walk_pred_tree(filter->preds, filter->root,
2270 test_walk_pred_cb,
2271 d->not_visited);
2272
2273 test_pred_visited = 0;
2274 err = filter_match_preds(filter, &d->rec);
2275 preempt_enable();
2276
2277 __free_filter(filter);
2278
2279 if (test_pred_visited) {
2280 printk(KERN_INFO
2281 "Failed, unwanted pred visited for filter %s\n",
2282 d->filter);
2283 break;
2284 }
2285
2286 if (err != d->match) {
2287 printk(KERN_INFO
2288 "Failed to match filter '%s', expected %d\n",
2289 d->filter, d->match);
2290 break;
2291 }
2292 }
2293
2294 if (i == DATA_CNT)
2295 printk(KERN_CONT "OK\n");
2296
2297 return 0;
2298 }
2299
2300 late_initcall(ftrace_test_event_filter);
2301
2302 #endif /* CONFIG_FTRACE_STARTUP_TEST */
This page took 0.080826 seconds and 5 git commands to generate.