Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
[deliverable/linux.git] / kernel / trace / trace_events_filter.c
1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_BAND,
48 OP_NOT,
49 OP_NONE,
50 OP_OPEN_PAREN,
51 };
52
53 struct filter_op {
54 int id;
55 char *string;
56 int precedence;
57 };
58
59 /* Order must be the same as enum filter_op_ids above */
60 static struct filter_op filter_ops[] = {
61 { OP_OR, "||", 1 },
62 { OP_AND, "&&", 2 },
63 { OP_GLOB, "~", 4 },
64 { OP_NE, "!=", 4 },
65 { OP_EQ, "==", 4 },
66 { OP_LT, "<", 5 },
67 { OP_LE, "<=", 5 },
68 { OP_GT, ">", 5 },
69 { OP_GE, ">=", 5 },
70 { OP_BAND, "&", 6 },
71 { OP_NOT, "!", 6 },
72 { OP_NONE, "OP_NONE", 0 },
73 { OP_OPEN_PAREN, "(", 0 },
74 };
75
76 enum {
77 FILT_ERR_NONE,
78 FILT_ERR_INVALID_OP,
79 FILT_ERR_UNBALANCED_PAREN,
80 FILT_ERR_TOO_MANY_OPERANDS,
81 FILT_ERR_OPERAND_TOO_LONG,
82 FILT_ERR_FIELD_NOT_FOUND,
83 FILT_ERR_ILLEGAL_FIELD_OP,
84 FILT_ERR_ILLEGAL_INTVAL,
85 FILT_ERR_BAD_SUBSYS_FILTER,
86 FILT_ERR_TOO_MANY_PREDS,
87 FILT_ERR_MISSING_FIELD,
88 FILT_ERR_INVALID_FILTER,
89 FILT_ERR_IP_FIELD_ONLY,
90 FILT_ERR_ILLEGAL_NOT_OP,
91 };
92
93 static char *err_text[] = {
94 "No error",
95 "Invalid operator",
96 "Unbalanced parens",
97 "Too many operands",
98 "Operand too long",
99 "Field not found",
100 "Illegal operation for field type",
101 "Illegal integer value",
102 "Couldn't find or set field in one of a subsystem's events",
103 "Too many terms in predicate expression",
104 "Missing field name and/or value",
105 "Meaningless filter expression",
106 "Only 'ip' field is supported for function trace",
107 "Illegal use of '!'",
108 };
109
110 struct opstack_op {
111 int op;
112 struct list_head list;
113 };
114
115 struct postfix_elt {
116 int op;
117 char *operand;
118 struct list_head list;
119 };
120
121 struct filter_parse_state {
122 struct filter_op *ops;
123 struct list_head opstack;
124 struct list_head postfix;
125 int lasterr;
126 int lasterr_pos;
127
128 struct {
129 char *string;
130 unsigned int cnt;
131 unsigned int tail;
132 } infix;
133
134 struct {
135 char string[MAX_FILTER_STR_VAL];
136 int pos;
137 unsigned int tail;
138 } operand;
139 };
140
141 struct pred_stack {
142 struct filter_pred **preds;
143 int index;
144 };
145
146 /* If not of not match is equal to not of not, then it is a match */
147 #define DEFINE_COMPARISON_PRED(type) \
148 static int filter_pred_##type(struct filter_pred *pred, void *event) \
149 { \
150 type *addr = (type *)(event + pred->offset); \
151 type val = (type)pred->val; \
152 int match = 0; \
153 \
154 switch (pred->op) { \
155 case OP_LT: \
156 match = (*addr < val); \
157 break; \
158 case OP_LE: \
159 match = (*addr <= val); \
160 break; \
161 case OP_GT: \
162 match = (*addr > val); \
163 break; \
164 case OP_GE: \
165 match = (*addr >= val); \
166 break; \
167 case OP_BAND: \
168 match = (*addr & val); \
169 break; \
170 default: \
171 break; \
172 } \
173 \
174 return !!match == !pred->not; \
175 }
176
177 #define DEFINE_EQUALITY_PRED(size) \
178 static int filter_pred_##size(struct filter_pred *pred, void *event) \
179 { \
180 u##size *addr = (u##size *)(event + pred->offset); \
181 u##size val = (u##size)pred->val; \
182 int match; \
183 \
184 match = (val == *addr) ^ pred->not; \
185 \
186 return match; \
187 }
188
189 DEFINE_COMPARISON_PRED(s64);
190 DEFINE_COMPARISON_PRED(u64);
191 DEFINE_COMPARISON_PRED(s32);
192 DEFINE_COMPARISON_PRED(u32);
193 DEFINE_COMPARISON_PRED(s16);
194 DEFINE_COMPARISON_PRED(u16);
195 DEFINE_COMPARISON_PRED(s8);
196 DEFINE_COMPARISON_PRED(u8);
197
198 DEFINE_EQUALITY_PRED(64);
199 DEFINE_EQUALITY_PRED(32);
200 DEFINE_EQUALITY_PRED(16);
201 DEFINE_EQUALITY_PRED(8);
202
203 /* Filter predicate for fixed sized arrays of characters */
204 static int filter_pred_string(struct filter_pred *pred, void *event)
205 {
206 char *addr = (char *)(event + pred->offset);
207 int cmp, match;
208
209 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
210
211 match = cmp ^ pred->not;
212
213 return match;
214 }
215
216 /* Filter predicate for char * pointers */
217 static int filter_pred_pchar(struct filter_pred *pred, void *event)
218 {
219 char **addr = (char **)(event + pred->offset);
220 int cmp, match;
221 int len = strlen(*addr) + 1; /* including tailing '\0' */
222
223 cmp = pred->regex.match(*addr, &pred->regex, len);
224
225 match = cmp ^ pred->not;
226
227 return match;
228 }
229
230 /*
231 * Filter predicate for dynamic sized arrays of characters.
232 * These are implemented through a list of strings at the end
233 * of the entry.
234 * Also each of these strings have a field in the entry which
235 * contains its offset from the beginning of the entry.
236 * We have then first to get this field, dereference it
237 * and add it to the address of the entry, and at last we have
238 * the address of the string.
239 */
240 static int filter_pred_strloc(struct filter_pred *pred, void *event)
241 {
242 u32 str_item = *(u32 *)(event + pred->offset);
243 int str_loc = str_item & 0xffff;
244 int str_len = str_item >> 16;
245 char *addr = (char *)(event + str_loc);
246 int cmp, match;
247
248 cmp = pred->regex.match(addr, &pred->regex, str_len);
249
250 match = cmp ^ pred->not;
251
252 return match;
253 }
254
255 static int filter_pred_none(struct filter_pred *pred, void *event)
256 {
257 return 0;
258 }
259
260 /*
261 * regex_match_foo - Basic regex callbacks
262 *
263 * @str: the string to be searched
264 * @r: the regex structure containing the pattern string
265 * @len: the length of the string to be searched (including '\0')
266 *
267 * Note:
268 * - @str might not be NULL-terminated if it's of type DYN_STRING
269 * or STATIC_STRING
270 */
271
272 static int regex_match_full(char *str, struct regex *r, int len)
273 {
274 if (strncmp(str, r->pattern, len) == 0)
275 return 1;
276 return 0;
277 }
278
279 static int regex_match_front(char *str, struct regex *r, int len)
280 {
281 if (strncmp(str, r->pattern, r->len) == 0)
282 return 1;
283 return 0;
284 }
285
286 static int regex_match_middle(char *str, struct regex *r, int len)
287 {
288 if (strnstr(str, r->pattern, len))
289 return 1;
290 return 0;
291 }
292
293 static int regex_match_end(char *str, struct regex *r, int len)
294 {
295 int strlen = len - 1;
296
297 if (strlen >= r->len &&
298 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
299 return 1;
300 return 0;
301 }
302
303 /**
304 * filter_parse_regex - parse a basic regex
305 * @buff: the raw regex
306 * @len: length of the regex
307 * @search: will point to the beginning of the string to compare
308 * @not: tell whether the match will have to be inverted
309 *
310 * This passes in a buffer containing a regex and this function will
311 * set search to point to the search part of the buffer and
312 * return the type of search it is (see enum above).
313 * This does modify buff.
314 *
315 * Returns enum type.
316 * search returns the pointer to use for comparison.
317 * not returns 1 if buff started with a '!'
318 * 0 otherwise.
319 */
320 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
321 {
322 int type = MATCH_FULL;
323 int i;
324
325 if (buff[0] == '!') {
326 *not = 1;
327 buff++;
328 len--;
329 } else
330 *not = 0;
331
332 *search = buff;
333
334 for (i = 0; i < len; i++) {
335 if (buff[i] == '*') {
336 if (!i) {
337 *search = buff + 1;
338 type = MATCH_END_ONLY;
339 } else {
340 if (type == MATCH_END_ONLY)
341 type = MATCH_MIDDLE_ONLY;
342 else
343 type = MATCH_FRONT_ONLY;
344 buff[i] = 0;
345 break;
346 }
347 }
348 }
349
350 return type;
351 }
352
353 static void filter_build_regex(struct filter_pred *pred)
354 {
355 struct regex *r = &pred->regex;
356 char *search;
357 enum regex_type type = MATCH_FULL;
358 int not = 0;
359
360 if (pred->op == OP_GLOB) {
361 type = filter_parse_regex(r->pattern, r->len, &search, &not);
362 r->len = strlen(search);
363 memmove(r->pattern, search, r->len+1);
364 }
365
366 switch (type) {
367 case MATCH_FULL:
368 r->match = regex_match_full;
369 break;
370 case MATCH_FRONT_ONLY:
371 r->match = regex_match_front;
372 break;
373 case MATCH_MIDDLE_ONLY:
374 r->match = regex_match_middle;
375 break;
376 case MATCH_END_ONLY:
377 r->match = regex_match_end;
378 break;
379 }
380
381 pred->not ^= not;
382 }
383
384 enum move_type {
385 MOVE_DOWN,
386 MOVE_UP_FROM_LEFT,
387 MOVE_UP_FROM_RIGHT
388 };
389
390 static struct filter_pred *
391 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
392 int index, enum move_type *move)
393 {
394 if (pred->parent & FILTER_PRED_IS_RIGHT)
395 *move = MOVE_UP_FROM_RIGHT;
396 else
397 *move = MOVE_UP_FROM_LEFT;
398 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
399
400 return pred;
401 }
402
403 enum walk_return {
404 WALK_PRED_ABORT,
405 WALK_PRED_PARENT,
406 WALK_PRED_DEFAULT,
407 };
408
409 typedef int (*filter_pred_walkcb_t) (enum move_type move,
410 struct filter_pred *pred,
411 int *err, void *data);
412
413 static int walk_pred_tree(struct filter_pred *preds,
414 struct filter_pred *root,
415 filter_pred_walkcb_t cb, void *data)
416 {
417 struct filter_pred *pred = root;
418 enum move_type move = MOVE_DOWN;
419 int done = 0;
420
421 if (!preds)
422 return -EINVAL;
423
424 do {
425 int err = 0, ret;
426
427 ret = cb(move, pred, &err, data);
428 if (ret == WALK_PRED_ABORT)
429 return err;
430 if (ret == WALK_PRED_PARENT)
431 goto get_parent;
432
433 switch (move) {
434 case MOVE_DOWN:
435 if (pred->left != FILTER_PRED_INVALID) {
436 pred = &preds[pred->left];
437 continue;
438 }
439 goto get_parent;
440 case MOVE_UP_FROM_LEFT:
441 pred = &preds[pred->right];
442 move = MOVE_DOWN;
443 continue;
444 case MOVE_UP_FROM_RIGHT:
445 get_parent:
446 if (pred == root)
447 break;
448 pred = get_pred_parent(pred, preds,
449 pred->parent,
450 &move);
451 continue;
452 }
453 done = 1;
454 } while (!done);
455
456 /* We are fine. */
457 return 0;
458 }
459
460 /*
461 * A series of AND or ORs where found together. Instead of
462 * climbing up and down the tree branches, an array of the
463 * ops were made in order of checks. We can just move across
464 * the array and short circuit if needed.
465 */
466 static int process_ops(struct filter_pred *preds,
467 struct filter_pred *op, void *rec)
468 {
469 struct filter_pred *pred;
470 int match = 0;
471 int type;
472 int i;
473
474 /*
475 * Micro-optimization: We set type to true if op
476 * is an OR and false otherwise (AND). Then we
477 * just need to test if the match is equal to
478 * the type, and if it is, we can short circuit the
479 * rest of the checks:
480 *
481 * if ((match && op->op == OP_OR) ||
482 * (!match && op->op == OP_AND))
483 * return match;
484 */
485 type = op->op == OP_OR;
486
487 for (i = 0; i < op->val; i++) {
488 pred = &preds[op->ops[i]];
489 if (!WARN_ON_ONCE(!pred->fn))
490 match = pred->fn(pred, rec);
491 if (!!match == type)
492 break;
493 }
494 /* If not of not match is equal to not of not, then it is a match */
495 return !!match == !op->not;
496 }
497
498 struct filter_match_preds_data {
499 struct filter_pred *preds;
500 int match;
501 void *rec;
502 };
503
504 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
505 int *err, void *data)
506 {
507 struct filter_match_preds_data *d = data;
508
509 *err = 0;
510 switch (move) {
511 case MOVE_DOWN:
512 /* only AND and OR have children */
513 if (pred->left != FILTER_PRED_INVALID) {
514 /* If ops is set, then it was folded. */
515 if (!pred->ops)
516 return WALK_PRED_DEFAULT;
517 /* We can treat folded ops as a leaf node */
518 d->match = process_ops(d->preds, pred, d->rec);
519 } else {
520 if (!WARN_ON_ONCE(!pred->fn))
521 d->match = pred->fn(pred, d->rec);
522 }
523
524 return WALK_PRED_PARENT;
525 case MOVE_UP_FROM_LEFT:
526 /*
527 * Check for short circuits.
528 *
529 * Optimization: !!match == (pred->op == OP_OR)
530 * is the same as:
531 * if ((match && pred->op == OP_OR) ||
532 * (!match && pred->op == OP_AND))
533 */
534 if (!!d->match == (pred->op == OP_OR))
535 return WALK_PRED_PARENT;
536 break;
537 case MOVE_UP_FROM_RIGHT:
538 break;
539 }
540
541 return WALK_PRED_DEFAULT;
542 }
543
544 /* return 1 if event matches, 0 otherwise (discard) */
545 int filter_match_preds(struct event_filter *filter, void *rec)
546 {
547 struct filter_pred *preds;
548 struct filter_pred *root;
549 struct filter_match_preds_data data = {
550 /* match is currently meaningless */
551 .match = -1,
552 .rec = rec,
553 };
554 int n_preds, ret;
555
556 /* no filter is considered a match */
557 if (!filter)
558 return 1;
559
560 n_preds = filter->n_preds;
561 if (!n_preds)
562 return 1;
563
564 /*
565 * n_preds, root and filter->preds are protect with preemption disabled.
566 */
567 root = rcu_dereference_sched(filter->root);
568 if (!root)
569 return 1;
570
571 data.preds = preds = rcu_dereference_sched(filter->preds);
572 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
573 WARN_ON(ret);
574 return data.match;
575 }
576 EXPORT_SYMBOL_GPL(filter_match_preds);
577
578 static void parse_error(struct filter_parse_state *ps, int err, int pos)
579 {
580 ps->lasterr = err;
581 ps->lasterr_pos = pos;
582 }
583
584 static void remove_filter_string(struct event_filter *filter)
585 {
586 if (!filter)
587 return;
588
589 kfree(filter->filter_string);
590 filter->filter_string = NULL;
591 }
592
593 static int replace_filter_string(struct event_filter *filter,
594 char *filter_string)
595 {
596 kfree(filter->filter_string);
597 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
598 if (!filter->filter_string)
599 return -ENOMEM;
600
601 return 0;
602 }
603
604 static int append_filter_string(struct event_filter *filter,
605 char *string)
606 {
607 int newlen;
608 char *new_filter_string;
609
610 BUG_ON(!filter->filter_string);
611 newlen = strlen(filter->filter_string) + strlen(string) + 1;
612 new_filter_string = kmalloc(newlen, GFP_KERNEL);
613 if (!new_filter_string)
614 return -ENOMEM;
615
616 strcpy(new_filter_string, filter->filter_string);
617 strcat(new_filter_string, string);
618 kfree(filter->filter_string);
619 filter->filter_string = new_filter_string;
620
621 return 0;
622 }
623
624 static void append_filter_err(struct filter_parse_state *ps,
625 struct event_filter *filter)
626 {
627 int pos = ps->lasterr_pos;
628 char *buf, *pbuf;
629
630 buf = (char *)__get_free_page(GFP_TEMPORARY);
631 if (!buf)
632 return;
633
634 append_filter_string(filter, "\n");
635 memset(buf, ' ', PAGE_SIZE);
636 if (pos > PAGE_SIZE - 128)
637 pos = 0;
638 buf[pos] = '^';
639 pbuf = &buf[pos] + 1;
640
641 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
642 append_filter_string(filter, buf);
643 free_page((unsigned long) buf);
644 }
645
646 static inline struct event_filter *event_filter(struct ftrace_event_file *file)
647 {
648 if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
649 return file->event_call->filter;
650 else
651 return file->filter;
652 }
653
654 /* caller must hold event_mutex */
655 void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s)
656 {
657 struct event_filter *filter = event_filter(file);
658
659 if (filter && filter->filter_string)
660 trace_seq_printf(s, "%s\n", filter->filter_string);
661 else
662 trace_seq_puts(s, "none\n");
663 }
664
665 void print_subsystem_event_filter(struct event_subsystem *system,
666 struct trace_seq *s)
667 {
668 struct event_filter *filter;
669
670 mutex_lock(&event_mutex);
671 filter = system->filter;
672 if (filter && filter->filter_string)
673 trace_seq_printf(s, "%s\n", filter->filter_string);
674 else
675 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
676 mutex_unlock(&event_mutex);
677 }
678
679 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
680 {
681 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
682 if (!stack->preds)
683 return -ENOMEM;
684 stack->index = n_preds;
685 return 0;
686 }
687
688 static void __free_pred_stack(struct pred_stack *stack)
689 {
690 kfree(stack->preds);
691 stack->index = 0;
692 }
693
694 static int __push_pred_stack(struct pred_stack *stack,
695 struct filter_pred *pred)
696 {
697 int index = stack->index;
698
699 if (WARN_ON(index == 0))
700 return -ENOSPC;
701
702 stack->preds[--index] = pred;
703 stack->index = index;
704 return 0;
705 }
706
707 static struct filter_pred *
708 __pop_pred_stack(struct pred_stack *stack)
709 {
710 struct filter_pred *pred;
711 int index = stack->index;
712
713 pred = stack->preds[index++];
714 if (!pred)
715 return NULL;
716
717 stack->index = index;
718 return pred;
719 }
720
721 static int filter_set_pred(struct event_filter *filter,
722 int idx,
723 struct pred_stack *stack,
724 struct filter_pred *src)
725 {
726 struct filter_pred *dest = &filter->preds[idx];
727 struct filter_pred *left;
728 struct filter_pred *right;
729
730 *dest = *src;
731 dest->index = idx;
732
733 if (dest->op == OP_OR || dest->op == OP_AND) {
734 right = __pop_pred_stack(stack);
735 left = __pop_pred_stack(stack);
736 if (!left || !right)
737 return -EINVAL;
738 /*
739 * If both children can be folded
740 * and they are the same op as this op or a leaf,
741 * then this op can be folded.
742 */
743 if (left->index & FILTER_PRED_FOLD &&
744 ((left->op == dest->op && !left->not) ||
745 left->left == FILTER_PRED_INVALID) &&
746 right->index & FILTER_PRED_FOLD &&
747 ((right->op == dest->op && !right->not) ||
748 right->left == FILTER_PRED_INVALID))
749 dest->index |= FILTER_PRED_FOLD;
750
751 dest->left = left->index & ~FILTER_PRED_FOLD;
752 dest->right = right->index & ~FILTER_PRED_FOLD;
753 left->parent = dest->index & ~FILTER_PRED_FOLD;
754 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
755 } else {
756 /*
757 * Make dest->left invalid to be used as a quick
758 * way to know this is a leaf node.
759 */
760 dest->left = FILTER_PRED_INVALID;
761
762 /* All leafs allow folding the parent ops. */
763 dest->index |= FILTER_PRED_FOLD;
764 }
765
766 return __push_pred_stack(stack, dest);
767 }
768
769 static void __free_preds(struct event_filter *filter)
770 {
771 int i;
772
773 if (filter->preds) {
774 for (i = 0; i < filter->n_preds; i++)
775 kfree(filter->preds[i].ops);
776 kfree(filter->preds);
777 filter->preds = NULL;
778 }
779 filter->a_preds = 0;
780 filter->n_preds = 0;
781 }
782
783 static void filter_disable(struct ftrace_event_file *file)
784 {
785 struct ftrace_event_call *call = file->event_call;
786
787 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
788 call->flags &= ~TRACE_EVENT_FL_FILTERED;
789 else
790 file->flags &= ~FTRACE_EVENT_FL_FILTERED;
791 }
792
793 static void __free_filter(struct event_filter *filter)
794 {
795 if (!filter)
796 return;
797
798 __free_preds(filter);
799 kfree(filter->filter_string);
800 kfree(filter);
801 }
802
803 void free_event_filter(struct event_filter *filter)
804 {
805 __free_filter(filter);
806 }
807
808 static struct event_filter *__alloc_filter(void)
809 {
810 struct event_filter *filter;
811
812 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
813 return filter;
814 }
815
816 static int __alloc_preds(struct event_filter *filter, int n_preds)
817 {
818 struct filter_pred *pred;
819 int i;
820
821 if (filter->preds)
822 __free_preds(filter);
823
824 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
825
826 if (!filter->preds)
827 return -ENOMEM;
828
829 filter->a_preds = n_preds;
830 filter->n_preds = 0;
831
832 for (i = 0; i < n_preds; i++) {
833 pred = &filter->preds[i];
834 pred->fn = filter_pred_none;
835 }
836
837 return 0;
838 }
839
840 static inline void __remove_filter(struct ftrace_event_file *file)
841 {
842 struct ftrace_event_call *call = file->event_call;
843
844 filter_disable(file);
845 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
846 remove_filter_string(call->filter);
847 else
848 remove_filter_string(file->filter);
849 }
850
851 static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir,
852 struct trace_array *tr)
853 {
854 struct ftrace_event_file *file;
855
856 list_for_each_entry(file, &tr->events, list) {
857 if (file->system != dir)
858 continue;
859 __remove_filter(file);
860 }
861 }
862
863 static inline void __free_subsystem_filter(struct ftrace_event_file *file)
864 {
865 struct ftrace_event_call *call = file->event_call;
866
867 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
868 __free_filter(call->filter);
869 call->filter = NULL;
870 } else {
871 __free_filter(file->filter);
872 file->filter = NULL;
873 }
874 }
875
876 static void filter_free_subsystem_filters(struct ftrace_subsystem_dir *dir,
877 struct trace_array *tr)
878 {
879 struct ftrace_event_file *file;
880
881 list_for_each_entry(file, &tr->events, list) {
882 if (file->system != dir)
883 continue;
884 __free_subsystem_filter(file);
885 }
886 }
887
888 static int filter_add_pred(struct filter_parse_state *ps,
889 struct event_filter *filter,
890 struct filter_pred *pred,
891 struct pred_stack *stack)
892 {
893 int err;
894
895 if (WARN_ON(filter->n_preds == filter->a_preds)) {
896 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
897 return -ENOSPC;
898 }
899
900 err = filter_set_pred(filter, filter->n_preds, stack, pred);
901 if (err)
902 return err;
903
904 filter->n_preds++;
905
906 return 0;
907 }
908
909 int filter_assign_type(const char *type)
910 {
911 if (strstr(type, "__data_loc") && strstr(type, "char"))
912 return FILTER_DYN_STRING;
913
914 if (strchr(type, '[') && strstr(type, "char"))
915 return FILTER_STATIC_STRING;
916
917 return FILTER_OTHER;
918 }
919
920 static bool is_function_field(struct ftrace_event_field *field)
921 {
922 return field->filter_type == FILTER_TRACE_FN;
923 }
924
925 static bool is_string_field(struct ftrace_event_field *field)
926 {
927 return field->filter_type == FILTER_DYN_STRING ||
928 field->filter_type == FILTER_STATIC_STRING ||
929 field->filter_type == FILTER_PTR_STRING;
930 }
931
932 static int is_legal_op(struct ftrace_event_field *field, int op)
933 {
934 if (is_string_field(field) &&
935 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
936 return 0;
937 if (!is_string_field(field) && op == OP_GLOB)
938 return 0;
939
940 return 1;
941 }
942
943 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
944 int field_is_signed)
945 {
946 filter_pred_fn_t fn = NULL;
947
948 switch (field_size) {
949 case 8:
950 if (op == OP_EQ || op == OP_NE)
951 fn = filter_pred_64;
952 else if (field_is_signed)
953 fn = filter_pred_s64;
954 else
955 fn = filter_pred_u64;
956 break;
957 case 4:
958 if (op == OP_EQ || op == OP_NE)
959 fn = filter_pred_32;
960 else if (field_is_signed)
961 fn = filter_pred_s32;
962 else
963 fn = filter_pred_u32;
964 break;
965 case 2:
966 if (op == OP_EQ || op == OP_NE)
967 fn = filter_pred_16;
968 else if (field_is_signed)
969 fn = filter_pred_s16;
970 else
971 fn = filter_pred_u16;
972 break;
973 case 1:
974 if (op == OP_EQ || op == OP_NE)
975 fn = filter_pred_8;
976 else if (field_is_signed)
977 fn = filter_pred_s8;
978 else
979 fn = filter_pred_u8;
980 break;
981 }
982
983 return fn;
984 }
985
986 static int init_pred(struct filter_parse_state *ps,
987 struct ftrace_event_field *field,
988 struct filter_pred *pred)
989
990 {
991 filter_pred_fn_t fn = filter_pred_none;
992 unsigned long long val;
993 int ret;
994
995 pred->offset = field->offset;
996
997 if (!is_legal_op(field, pred->op)) {
998 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
999 return -EINVAL;
1000 }
1001
1002 if (is_string_field(field)) {
1003 filter_build_regex(pred);
1004
1005 if (field->filter_type == FILTER_STATIC_STRING) {
1006 fn = filter_pred_string;
1007 pred->regex.field_len = field->size;
1008 } else if (field->filter_type == FILTER_DYN_STRING)
1009 fn = filter_pred_strloc;
1010 else
1011 fn = filter_pred_pchar;
1012 } else if (is_function_field(field)) {
1013 if (strcmp(field->name, "ip")) {
1014 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1015 return -EINVAL;
1016 }
1017 } else {
1018 if (field->is_signed)
1019 ret = kstrtoll(pred->regex.pattern, 0, &val);
1020 else
1021 ret = kstrtoull(pred->regex.pattern, 0, &val);
1022 if (ret) {
1023 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1024 return -EINVAL;
1025 }
1026 pred->val = val;
1027
1028 fn = select_comparison_fn(pred->op, field->size,
1029 field->is_signed);
1030 if (!fn) {
1031 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1032 return -EINVAL;
1033 }
1034 }
1035
1036 if (pred->op == OP_NE)
1037 pred->not ^= 1;
1038
1039 pred->fn = fn;
1040 return 0;
1041 }
1042
1043 static void parse_init(struct filter_parse_state *ps,
1044 struct filter_op *ops,
1045 char *infix_string)
1046 {
1047 memset(ps, '\0', sizeof(*ps));
1048
1049 ps->infix.string = infix_string;
1050 ps->infix.cnt = strlen(infix_string);
1051 ps->ops = ops;
1052
1053 INIT_LIST_HEAD(&ps->opstack);
1054 INIT_LIST_HEAD(&ps->postfix);
1055 }
1056
1057 static char infix_next(struct filter_parse_state *ps)
1058 {
1059 ps->infix.cnt--;
1060
1061 return ps->infix.string[ps->infix.tail++];
1062 }
1063
1064 static char infix_peek(struct filter_parse_state *ps)
1065 {
1066 if (ps->infix.tail == strlen(ps->infix.string))
1067 return 0;
1068
1069 return ps->infix.string[ps->infix.tail];
1070 }
1071
1072 static void infix_advance(struct filter_parse_state *ps)
1073 {
1074 ps->infix.cnt--;
1075 ps->infix.tail++;
1076 }
1077
1078 static inline int is_precedence_lower(struct filter_parse_state *ps,
1079 int a, int b)
1080 {
1081 return ps->ops[a].precedence < ps->ops[b].precedence;
1082 }
1083
1084 static inline int is_op_char(struct filter_parse_state *ps, char c)
1085 {
1086 int i;
1087
1088 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1089 if (ps->ops[i].string[0] == c)
1090 return 1;
1091 }
1092
1093 return 0;
1094 }
1095
1096 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1097 {
1098 char nextc = infix_peek(ps);
1099 char opstr[3];
1100 int i;
1101
1102 opstr[0] = firstc;
1103 opstr[1] = nextc;
1104 opstr[2] = '\0';
1105
1106 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1107 if (!strcmp(opstr, ps->ops[i].string)) {
1108 infix_advance(ps);
1109 return ps->ops[i].id;
1110 }
1111 }
1112
1113 opstr[1] = '\0';
1114
1115 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1116 if (!strcmp(opstr, ps->ops[i].string))
1117 return ps->ops[i].id;
1118 }
1119
1120 return OP_NONE;
1121 }
1122
1123 static inline void clear_operand_string(struct filter_parse_state *ps)
1124 {
1125 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1126 ps->operand.tail = 0;
1127 }
1128
1129 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1130 {
1131 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1132 return -EINVAL;
1133
1134 ps->operand.string[ps->operand.tail++] = c;
1135
1136 return 0;
1137 }
1138
1139 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1140 {
1141 struct opstack_op *opstack_op;
1142
1143 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1144 if (!opstack_op)
1145 return -ENOMEM;
1146
1147 opstack_op->op = op;
1148 list_add(&opstack_op->list, &ps->opstack);
1149
1150 return 0;
1151 }
1152
1153 static int filter_opstack_empty(struct filter_parse_state *ps)
1154 {
1155 return list_empty(&ps->opstack);
1156 }
1157
1158 static int filter_opstack_top(struct filter_parse_state *ps)
1159 {
1160 struct opstack_op *opstack_op;
1161
1162 if (filter_opstack_empty(ps))
1163 return OP_NONE;
1164
1165 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1166
1167 return opstack_op->op;
1168 }
1169
1170 static int filter_opstack_pop(struct filter_parse_state *ps)
1171 {
1172 struct opstack_op *opstack_op;
1173 int op;
1174
1175 if (filter_opstack_empty(ps))
1176 return OP_NONE;
1177
1178 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1179 op = opstack_op->op;
1180 list_del(&opstack_op->list);
1181
1182 kfree(opstack_op);
1183
1184 return op;
1185 }
1186
1187 static void filter_opstack_clear(struct filter_parse_state *ps)
1188 {
1189 while (!filter_opstack_empty(ps))
1190 filter_opstack_pop(ps);
1191 }
1192
1193 static char *curr_operand(struct filter_parse_state *ps)
1194 {
1195 return ps->operand.string;
1196 }
1197
1198 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1199 {
1200 struct postfix_elt *elt;
1201
1202 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1203 if (!elt)
1204 return -ENOMEM;
1205
1206 elt->op = OP_NONE;
1207 elt->operand = kstrdup(operand, GFP_KERNEL);
1208 if (!elt->operand) {
1209 kfree(elt);
1210 return -ENOMEM;
1211 }
1212
1213 list_add_tail(&elt->list, &ps->postfix);
1214
1215 return 0;
1216 }
1217
1218 static int postfix_append_op(struct filter_parse_state *ps, int op)
1219 {
1220 struct postfix_elt *elt;
1221
1222 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1223 if (!elt)
1224 return -ENOMEM;
1225
1226 elt->op = op;
1227 elt->operand = NULL;
1228
1229 list_add_tail(&elt->list, &ps->postfix);
1230
1231 return 0;
1232 }
1233
1234 static void postfix_clear(struct filter_parse_state *ps)
1235 {
1236 struct postfix_elt *elt;
1237
1238 while (!list_empty(&ps->postfix)) {
1239 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1240 list_del(&elt->list);
1241 kfree(elt->operand);
1242 kfree(elt);
1243 }
1244 }
1245
1246 static int filter_parse(struct filter_parse_state *ps)
1247 {
1248 int in_string = 0;
1249 int op, top_op;
1250 char ch;
1251
1252 while ((ch = infix_next(ps))) {
1253 if (ch == '"') {
1254 in_string ^= 1;
1255 continue;
1256 }
1257
1258 if (in_string)
1259 goto parse_operand;
1260
1261 if (isspace(ch))
1262 continue;
1263
1264 if (is_op_char(ps, ch)) {
1265 op = infix_get_op(ps, ch);
1266 if (op == OP_NONE) {
1267 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1268 return -EINVAL;
1269 }
1270
1271 if (strlen(curr_operand(ps))) {
1272 postfix_append_operand(ps, curr_operand(ps));
1273 clear_operand_string(ps);
1274 }
1275
1276 while (!filter_opstack_empty(ps)) {
1277 top_op = filter_opstack_top(ps);
1278 if (!is_precedence_lower(ps, top_op, op)) {
1279 top_op = filter_opstack_pop(ps);
1280 postfix_append_op(ps, top_op);
1281 continue;
1282 }
1283 break;
1284 }
1285
1286 filter_opstack_push(ps, op);
1287 continue;
1288 }
1289
1290 if (ch == '(') {
1291 filter_opstack_push(ps, OP_OPEN_PAREN);
1292 continue;
1293 }
1294
1295 if (ch == ')') {
1296 if (strlen(curr_operand(ps))) {
1297 postfix_append_operand(ps, curr_operand(ps));
1298 clear_operand_string(ps);
1299 }
1300
1301 top_op = filter_opstack_pop(ps);
1302 while (top_op != OP_NONE) {
1303 if (top_op == OP_OPEN_PAREN)
1304 break;
1305 postfix_append_op(ps, top_op);
1306 top_op = filter_opstack_pop(ps);
1307 }
1308 if (top_op == OP_NONE) {
1309 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1310 return -EINVAL;
1311 }
1312 continue;
1313 }
1314 parse_operand:
1315 if (append_operand_char(ps, ch)) {
1316 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1317 return -EINVAL;
1318 }
1319 }
1320
1321 if (strlen(curr_operand(ps)))
1322 postfix_append_operand(ps, curr_operand(ps));
1323
1324 while (!filter_opstack_empty(ps)) {
1325 top_op = filter_opstack_pop(ps);
1326 if (top_op == OP_NONE)
1327 break;
1328 if (top_op == OP_OPEN_PAREN) {
1329 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1330 return -EINVAL;
1331 }
1332 postfix_append_op(ps, top_op);
1333 }
1334
1335 return 0;
1336 }
1337
1338 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1339 struct ftrace_event_call *call,
1340 int op, char *operand1, char *operand2)
1341 {
1342 struct ftrace_event_field *field;
1343 static struct filter_pred pred;
1344
1345 memset(&pred, 0, sizeof(pred));
1346 pred.op = op;
1347
1348 if (op == OP_AND || op == OP_OR)
1349 return &pred;
1350
1351 if (!operand1 || !operand2) {
1352 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1353 return NULL;
1354 }
1355
1356 field = trace_find_event_field(call, operand1);
1357 if (!field) {
1358 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1359 return NULL;
1360 }
1361
1362 strcpy(pred.regex.pattern, operand2);
1363 pred.regex.len = strlen(pred.regex.pattern);
1364 pred.field = field;
1365 return init_pred(ps, field, &pred) ? NULL : &pred;
1366 }
1367
1368 static int check_preds(struct filter_parse_state *ps)
1369 {
1370 int n_normal_preds = 0, n_logical_preds = 0;
1371 struct postfix_elt *elt;
1372 int cnt = 0;
1373
1374 list_for_each_entry(elt, &ps->postfix, list) {
1375 if (elt->op == OP_NONE) {
1376 cnt++;
1377 continue;
1378 }
1379
1380 if (elt->op == OP_AND || elt->op == OP_OR) {
1381 n_logical_preds++;
1382 cnt--;
1383 continue;
1384 }
1385 if (elt->op != OP_NOT)
1386 cnt--;
1387 n_normal_preds++;
1388 WARN_ON_ONCE(cnt < 0);
1389 }
1390
1391 if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
1392 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1393 return -EINVAL;
1394 }
1395
1396 return 0;
1397 }
1398
1399 static int count_preds(struct filter_parse_state *ps)
1400 {
1401 struct postfix_elt *elt;
1402 int n_preds = 0;
1403
1404 list_for_each_entry(elt, &ps->postfix, list) {
1405 if (elt->op == OP_NONE)
1406 continue;
1407 n_preds++;
1408 }
1409
1410 return n_preds;
1411 }
1412
1413 struct check_pred_data {
1414 int count;
1415 int max;
1416 };
1417
1418 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1419 int *err, void *data)
1420 {
1421 struct check_pred_data *d = data;
1422
1423 if (WARN_ON(d->count++ > d->max)) {
1424 *err = -EINVAL;
1425 return WALK_PRED_ABORT;
1426 }
1427 return WALK_PRED_DEFAULT;
1428 }
1429
1430 /*
1431 * The tree is walked at filtering of an event. If the tree is not correctly
1432 * built, it may cause an infinite loop. Check here that the tree does
1433 * indeed terminate.
1434 */
1435 static int check_pred_tree(struct event_filter *filter,
1436 struct filter_pred *root)
1437 {
1438 struct check_pred_data data = {
1439 /*
1440 * The max that we can hit a node is three times.
1441 * Once going down, once coming up from left, and
1442 * once coming up from right. This is more than enough
1443 * since leafs are only hit a single time.
1444 */
1445 .max = 3 * filter->n_preds,
1446 .count = 0,
1447 };
1448
1449 return walk_pred_tree(filter->preds, root,
1450 check_pred_tree_cb, &data);
1451 }
1452
1453 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1454 int *err, void *data)
1455 {
1456 int *count = data;
1457
1458 if ((move == MOVE_DOWN) &&
1459 (pred->left == FILTER_PRED_INVALID))
1460 (*count)++;
1461
1462 return WALK_PRED_DEFAULT;
1463 }
1464
1465 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1466 {
1467 int count = 0, ret;
1468
1469 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1470 WARN_ON(ret);
1471 return count;
1472 }
1473
1474 struct fold_pred_data {
1475 struct filter_pred *root;
1476 int count;
1477 int children;
1478 };
1479
1480 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1481 int *err, void *data)
1482 {
1483 struct fold_pred_data *d = data;
1484 struct filter_pred *root = d->root;
1485
1486 if (move != MOVE_DOWN)
1487 return WALK_PRED_DEFAULT;
1488 if (pred->left != FILTER_PRED_INVALID)
1489 return WALK_PRED_DEFAULT;
1490
1491 if (WARN_ON(d->count == d->children)) {
1492 *err = -EINVAL;
1493 return WALK_PRED_ABORT;
1494 }
1495
1496 pred->index &= ~FILTER_PRED_FOLD;
1497 root->ops[d->count++] = pred->index;
1498 return WALK_PRED_DEFAULT;
1499 }
1500
1501 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1502 {
1503 struct fold_pred_data data = {
1504 .root = root,
1505 .count = 0,
1506 };
1507 int children;
1508
1509 /* No need to keep the fold flag */
1510 root->index &= ~FILTER_PRED_FOLD;
1511
1512 /* If the root is a leaf then do nothing */
1513 if (root->left == FILTER_PRED_INVALID)
1514 return 0;
1515
1516 /* count the children */
1517 children = count_leafs(preds, &preds[root->left]);
1518 children += count_leafs(preds, &preds[root->right]);
1519
1520 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1521 if (!root->ops)
1522 return -ENOMEM;
1523
1524 root->val = children;
1525 data.children = children;
1526 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1527 }
1528
1529 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1530 int *err, void *data)
1531 {
1532 struct filter_pred *preds = data;
1533
1534 if (move != MOVE_DOWN)
1535 return WALK_PRED_DEFAULT;
1536 if (!(pred->index & FILTER_PRED_FOLD))
1537 return WALK_PRED_DEFAULT;
1538
1539 *err = fold_pred(preds, pred);
1540 if (*err)
1541 return WALK_PRED_ABORT;
1542
1543 /* eveyrhing below is folded, continue with parent */
1544 return WALK_PRED_PARENT;
1545 }
1546
1547 /*
1548 * To optimize the processing of the ops, if we have several "ors" or
1549 * "ands" together, we can put them in an array and process them all
1550 * together speeding up the filter logic.
1551 */
1552 static int fold_pred_tree(struct event_filter *filter,
1553 struct filter_pred *root)
1554 {
1555 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1556 filter->preds);
1557 }
1558
1559 static int replace_preds(struct ftrace_event_call *call,
1560 struct event_filter *filter,
1561 struct filter_parse_state *ps,
1562 bool dry_run)
1563 {
1564 char *operand1 = NULL, *operand2 = NULL;
1565 struct filter_pred *pred;
1566 struct filter_pred *root;
1567 struct postfix_elt *elt;
1568 struct pred_stack stack = { }; /* init to NULL */
1569 int err;
1570 int n_preds = 0;
1571
1572 n_preds = count_preds(ps);
1573 if (n_preds >= MAX_FILTER_PRED) {
1574 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1575 return -ENOSPC;
1576 }
1577
1578 err = check_preds(ps);
1579 if (err)
1580 return err;
1581
1582 if (!dry_run) {
1583 err = __alloc_pred_stack(&stack, n_preds);
1584 if (err)
1585 return err;
1586 err = __alloc_preds(filter, n_preds);
1587 if (err)
1588 goto fail;
1589 }
1590
1591 n_preds = 0;
1592 list_for_each_entry(elt, &ps->postfix, list) {
1593 if (elt->op == OP_NONE) {
1594 if (!operand1)
1595 operand1 = elt->operand;
1596 else if (!operand2)
1597 operand2 = elt->operand;
1598 else {
1599 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1600 err = -EINVAL;
1601 goto fail;
1602 }
1603 continue;
1604 }
1605
1606 if (elt->op == OP_NOT) {
1607 if (!n_preds || operand1 || operand2) {
1608 parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
1609 err = -EINVAL;
1610 goto fail;
1611 }
1612 if (!dry_run)
1613 filter->preds[n_preds - 1].not ^= 1;
1614 continue;
1615 }
1616
1617 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1618 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1619 err = -ENOSPC;
1620 goto fail;
1621 }
1622
1623 pred = create_pred(ps, call, elt->op, operand1, operand2);
1624 if (!pred) {
1625 err = -EINVAL;
1626 goto fail;
1627 }
1628
1629 if (!dry_run) {
1630 err = filter_add_pred(ps, filter, pred, &stack);
1631 if (err)
1632 goto fail;
1633 }
1634
1635 operand1 = operand2 = NULL;
1636 }
1637
1638 if (!dry_run) {
1639 /* We should have one item left on the stack */
1640 pred = __pop_pred_stack(&stack);
1641 if (!pred)
1642 return -EINVAL;
1643 /* This item is where we start from in matching */
1644 root = pred;
1645 /* Make sure the stack is empty */
1646 pred = __pop_pred_stack(&stack);
1647 if (WARN_ON(pred)) {
1648 err = -EINVAL;
1649 filter->root = NULL;
1650 goto fail;
1651 }
1652 err = check_pred_tree(filter, root);
1653 if (err)
1654 goto fail;
1655
1656 /* Optimize the tree */
1657 err = fold_pred_tree(filter, root);
1658 if (err)
1659 goto fail;
1660
1661 /* We don't set root until we know it works */
1662 barrier();
1663 filter->root = root;
1664 }
1665
1666 err = 0;
1667 fail:
1668 __free_pred_stack(&stack);
1669 return err;
1670 }
1671
1672 static inline void event_set_filtered_flag(struct ftrace_event_file *file)
1673 {
1674 struct ftrace_event_call *call = file->event_call;
1675
1676 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1677 call->flags |= TRACE_EVENT_FL_FILTERED;
1678 else
1679 file->flags |= FTRACE_EVENT_FL_FILTERED;
1680 }
1681
1682 static inline void event_set_filter(struct ftrace_event_file *file,
1683 struct event_filter *filter)
1684 {
1685 struct ftrace_event_call *call = file->event_call;
1686
1687 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1688 rcu_assign_pointer(call->filter, filter);
1689 else
1690 rcu_assign_pointer(file->filter, filter);
1691 }
1692
1693 static inline void event_clear_filter(struct ftrace_event_file *file)
1694 {
1695 struct ftrace_event_call *call = file->event_call;
1696
1697 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1698 RCU_INIT_POINTER(call->filter, NULL);
1699 else
1700 RCU_INIT_POINTER(file->filter, NULL);
1701 }
1702
1703 static inline void
1704 event_set_no_set_filter_flag(struct ftrace_event_file *file)
1705 {
1706 struct ftrace_event_call *call = file->event_call;
1707
1708 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1709 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1710 else
1711 file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER;
1712 }
1713
1714 static inline void
1715 event_clear_no_set_filter_flag(struct ftrace_event_file *file)
1716 {
1717 struct ftrace_event_call *call = file->event_call;
1718
1719 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1720 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1721 else
1722 file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER;
1723 }
1724
1725 static inline bool
1726 event_no_set_filter_flag(struct ftrace_event_file *file)
1727 {
1728 struct ftrace_event_call *call = file->event_call;
1729
1730 if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER)
1731 return true;
1732
1733 if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
1734 (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
1735 return true;
1736
1737 return false;
1738 }
1739
1740 struct filter_list {
1741 struct list_head list;
1742 struct event_filter *filter;
1743 };
1744
1745 static int replace_system_preds(struct ftrace_subsystem_dir *dir,
1746 struct trace_array *tr,
1747 struct filter_parse_state *ps,
1748 char *filter_string)
1749 {
1750 struct ftrace_event_file *file;
1751 struct filter_list *filter_item;
1752 struct filter_list *tmp;
1753 LIST_HEAD(filter_list);
1754 bool fail = true;
1755 int err;
1756
1757 list_for_each_entry(file, &tr->events, list) {
1758 if (file->system != dir)
1759 continue;
1760
1761 /*
1762 * Try to see if the filter can be applied
1763 * (filter arg is ignored on dry_run)
1764 */
1765 err = replace_preds(file->event_call, NULL, ps, true);
1766 if (err)
1767 event_set_no_set_filter_flag(file);
1768 else
1769 event_clear_no_set_filter_flag(file);
1770 }
1771
1772 list_for_each_entry(file, &tr->events, list) {
1773 struct event_filter *filter;
1774
1775 if (file->system != dir)
1776 continue;
1777
1778 if (event_no_set_filter_flag(file))
1779 continue;
1780
1781 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1782 if (!filter_item)
1783 goto fail_mem;
1784
1785 list_add_tail(&filter_item->list, &filter_list);
1786
1787 filter_item->filter = __alloc_filter();
1788 if (!filter_item->filter)
1789 goto fail_mem;
1790 filter = filter_item->filter;
1791
1792 /* Can only fail on no memory */
1793 err = replace_filter_string(filter, filter_string);
1794 if (err)
1795 goto fail_mem;
1796
1797 err = replace_preds(file->event_call, filter, ps, false);
1798 if (err) {
1799 filter_disable(file);
1800 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1801 append_filter_err(ps, filter);
1802 } else
1803 event_set_filtered_flag(file);
1804 /*
1805 * Regardless of if this returned an error, we still
1806 * replace the filter for the call.
1807 */
1808 filter = event_filter(file);
1809 event_set_filter(file, filter_item->filter);
1810 filter_item->filter = filter;
1811
1812 fail = false;
1813 }
1814
1815 if (fail)
1816 goto fail;
1817
1818 /*
1819 * The calls can still be using the old filters.
1820 * Do a synchronize_sched() to ensure all calls are
1821 * done with them before we free them.
1822 */
1823 synchronize_sched();
1824 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1825 __free_filter(filter_item->filter);
1826 list_del(&filter_item->list);
1827 kfree(filter_item);
1828 }
1829 return 0;
1830 fail:
1831 /* No call succeeded */
1832 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1833 list_del(&filter_item->list);
1834 kfree(filter_item);
1835 }
1836 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1837 return -EINVAL;
1838 fail_mem:
1839 /* If any call succeeded, we still need to sync */
1840 if (!fail)
1841 synchronize_sched();
1842 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1843 __free_filter(filter_item->filter);
1844 list_del(&filter_item->list);
1845 kfree(filter_item);
1846 }
1847 return -ENOMEM;
1848 }
1849
1850 static int create_filter_start(char *filter_str, bool set_str,
1851 struct filter_parse_state **psp,
1852 struct event_filter **filterp)
1853 {
1854 struct event_filter *filter;
1855 struct filter_parse_state *ps = NULL;
1856 int err = 0;
1857
1858 WARN_ON_ONCE(*psp || *filterp);
1859
1860 /* allocate everything, and if any fails, free all and fail */
1861 filter = __alloc_filter();
1862 if (filter && set_str)
1863 err = replace_filter_string(filter, filter_str);
1864
1865 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1866
1867 if (!filter || !ps || err) {
1868 kfree(ps);
1869 __free_filter(filter);
1870 return -ENOMEM;
1871 }
1872
1873 /* we're committed to creating a new filter */
1874 *filterp = filter;
1875 *psp = ps;
1876
1877 parse_init(ps, filter_ops, filter_str);
1878 err = filter_parse(ps);
1879 if (err && set_str)
1880 append_filter_err(ps, filter);
1881 return err;
1882 }
1883
1884 static void create_filter_finish(struct filter_parse_state *ps)
1885 {
1886 if (ps) {
1887 filter_opstack_clear(ps);
1888 postfix_clear(ps);
1889 kfree(ps);
1890 }
1891 }
1892
1893 /**
1894 * create_filter - create a filter for a ftrace_event_call
1895 * @call: ftrace_event_call to create a filter for
1896 * @filter_str: filter string
1897 * @set_str: remember @filter_str and enable detailed error in filter
1898 * @filterp: out param for created filter (always updated on return)
1899 *
1900 * Creates a filter for @call with @filter_str. If @set_str is %true,
1901 * @filter_str is copied and recorded in the new filter.
1902 *
1903 * On success, returns 0 and *@filterp points to the new filter. On
1904 * failure, returns -errno and *@filterp may point to %NULL or to a new
1905 * filter. In the latter case, the returned filter contains error
1906 * information if @set_str is %true and the caller is responsible for
1907 * freeing it.
1908 */
1909 static int create_filter(struct ftrace_event_call *call,
1910 char *filter_str, bool set_str,
1911 struct event_filter **filterp)
1912 {
1913 struct event_filter *filter = NULL;
1914 struct filter_parse_state *ps = NULL;
1915 int err;
1916
1917 err = create_filter_start(filter_str, set_str, &ps, &filter);
1918 if (!err) {
1919 err = replace_preds(call, filter, ps, false);
1920 if (err && set_str)
1921 append_filter_err(ps, filter);
1922 }
1923 create_filter_finish(ps);
1924
1925 *filterp = filter;
1926 return err;
1927 }
1928
1929 int create_event_filter(struct ftrace_event_call *call,
1930 char *filter_str, bool set_str,
1931 struct event_filter **filterp)
1932 {
1933 return create_filter(call, filter_str, set_str, filterp);
1934 }
1935
1936 /**
1937 * create_system_filter - create a filter for an event_subsystem
1938 * @system: event_subsystem to create a filter for
1939 * @filter_str: filter string
1940 * @filterp: out param for created filter (always updated on return)
1941 *
1942 * Identical to create_filter() except that it creates a subsystem filter
1943 * and always remembers @filter_str.
1944 */
1945 static int create_system_filter(struct ftrace_subsystem_dir *dir,
1946 struct trace_array *tr,
1947 char *filter_str, struct event_filter **filterp)
1948 {
1949 struct event_filter *filter = NULL;
1950 struct filter_parse_state *ps = NULL;
1951 int err;
1952
1953 err = create_filter_start(filter_str, true, &ps, &filter);
1954 if (!err) {
1955 err = replace_system_preds(dir, tr, ps, filter_str);
1956 if (!err) {
1957 /* System filters just show a default message */
1958 kfree(filter->filter_string);
1959 filter->filter_string = NULL;
1960 } else {
1961 append_filter_err(ps, filter);
1962 }
1963 }
1964 create_filter_finish(ps);
1965
1966 *filterp = filter;
1967 return err;
1968 }
1969
1970 /* caller must hold event_mutex */
1971 int apply_event_filter(struct ftrace_event_file *file, char *filter_string)
1972 {
1973 struct ftrace_event_call *call = file->event_call;
1974 struct event_filter *filter;
1975 int err;
1976
1977 if (!strcmp(strstrip(filter_string), "0")) {
1978 filter_disable(file);
1979 filter = event_filter(file);
1980
1981 if (!filter)
1982 return 0;
1983
1984 event_clear_filter(file);
1985
1986 /* Make sure the filter is not being used */
1987 synchronize_sched();
1988 __free_filter(filter);
1989
1990 return 0;
1991 }
1992
1993 err = create_filter(call, filter_string, true, &filter);
1994
1995 /*
1996 * Always swap the call filter with the new filter
1997 * even if there was an error. If there was an error
1998 * in the filter, we disable the filter and show the error
1999 * string
2000 */
2001 if (filter) {
2002 struct event_filter *tmp;
2003
2004 tmp = event_filter(file);
2005 if (!err)
2006 event_set_filtered_flag(file);
2007 else
2008 filter_disable(file);
2009
2010 event_set_filter(file, filter);
2011
2012 if (tmp) {
2013 /* Make sure the call is done with the filter */
2014 synchronize_sched();
2015 __free_filter(tmp);
2016 }
2017 }
2018
2019 return err;
2020 }
2021
2022 int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
2023 char *filter_string)
2024 {
2025 struct event_subsystem *system = dir->subsystem;
2026 struct trace_array *tr = dir->tr;
2027 struct event_filter *filter;
2028 int err = 0;
2029
2030 mutex_lock(&event_mutex);
2031
2032 /* Make sure the system still has events */
2033 if (!dir->nr_events) {
2034 err = -ENODEV;
2035 goto out_unlock;
2036 }
2037
2038 if (!strcmp(strstrip(filter_string), "0")) {
2039 filter_free_subsystem_preds(dir, tr);
2040 remove_filter_string(system->filter);
2041 filter = system->filter;
2042 system->filter = NULL;
2043 /* Ensure all filters are no longer used */
2044 synchronize_sched();
2045 filter_free_subsystem_filters(dir, tr);
2046 __free_filter(filter);
2047 goto out_unlock;
2048 }
2049
2050 err = create_system_filter(dir, tr, filter_string, &filter);
2051 if (filter) {
2052 /*
2053 * No event actually uses the system filter
2054 * we can free it without synchronize_sched().
2055 */
2056 __free_filter(system->filter);
2057 system->filter = filter;
2058 }
2059 out_unlock:
2060 mutex_unlock(&event_mutex);
2061
2062 return err;
2063 }
2064
2065 #ifdef CONFIG_PERF_EVENTS
2066
2067 void ftrace_profile_free_filter(struct perf_event *event)
2068 {
2069 struct event_filter *filter = event->filter;
2070
2071 event->filter = NULL;
2072 __free_filter(filter);
2073 }
2074
2075 struct function_filter_data {
2076 struct ftrace_ops *ops;
2077 int first_filter;
2078 int first_notrace;
2079 };
2080
2081 #ifdef CONFIG_FUNCTION_TRACER
2082 static char **
2083 ftrace_function_filter_re(char *buf, int len, int *count)
2084 {
2085 char *str, **re;
2086
2087 str = kstrndup(buf, len, GFP_KERNEL);
2088 if (!str)
2089 return NULL;
2090
2091 /*
2092 * The argv_split function takes white space
2093 * as a separator, so convert ',' into spaces.
2094 */
2095 strreplace(str, ',', ' ');
2096
2097 re = argv_split(GFP_KERNEL, str, count);
2098 kfree(str);
2099 return re;
2100 }
2101
2102 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2103 int reset, char *re, int len)
2104 {
2105 int ret;
2106
2107 if (filter)
2108 ret = ftrace_set_filter(ops, re, len, reset);
2109 else
2110 ret = ftrace_set_notrace(ops, re, len, reset);
2111
2112 return ret;
2113 }
2114
2115 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2116 struct function_filter_data *data)
2117 {
2118 int i, re_cnt, ret = -EINVAL;
2119 int *reset;
2120 char **re;
2121
2122 reset = filter ? &data->first_filter : &data->first_notrace;
2123
2124 /*
2125 * The 'ip' field could have multiple filters set, separated
2126 * either by space or comma. We first cut the filter and apply
2127 * all pieces separatelly.
2128 */
2129 re = ftrace_function_filter_re(buf, len, &re_cnt);
2130 if (!re)
2131 return -EINVAL;
2132
2133 for (i = 0; i < re_cnt; i++) {
2134 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2135 re[i], strlen(re[i]));
2136 if (ret)
2137 break;
2138
2139 if (*reset)
2140 *reset = 0;
2141 }
2142
2143 argv_free(re);
2144 return ret;
2145 }
2146
2147 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2148 {
2149 struct ftrace_event_field *field = pred->field;
2150
2151 if (leaf) {
2152 /*
2153 * Check the leaf predicate for function trace, verify:
2154 * - only '==' and '!=' is used
2155 * - the 'ip' field is used
2156 */
2157 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2158 return -EINVAL;
2159
2160 if (strcmp(field->name, "ip"))
2161 return -EINVAL;
2162 } else {
2163 /*
2164 * Check the non leaf predicate for function trace, verify:
2165 * - only '||' is used
2166 */
2167 if (pred->op != OP_OR)
2168 return -EINVAL;
2169 }
2170
2171 return 0;
2172 }
2173
2174 static int ftrace_function_set_filter_cb(enum move_type move,
2175 struct filter_pred *pred,
2176 int *err, void *data)
2177 {
2178 /* Checking the node is valid for function trace. */
2179 if ((move != MOVE_DOWN) ||
2180 (pred->left != FILTER_PRED_INVALID)) {
2181 *err = ftrace_function_check_pred(pred, 0);
2182 } else {
2183 *err = ftrace_function_check_pred(pred, 1);
2184 if (*err)
2185 return WALK_PRED_ABORT;
2186
2187 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2188 pred->regex.pattern,
2189 pred->regex.len,
2190 data);
2191 }
2192
2193 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2194 }
2195
2196 static int ftrace_function_set_filter(struct perf_event *event,
2197 struct event_filter *filter)
2198 {
2199 struct function_filter_data data = {
2200 .first_filter = 1,
2201 .first_notrace = 1,
2202 .ops = &event->ftrace_ops,
2203 };
2204
2205 return walk_pred_tree(filter->preds, filter->root,
2206 ftrace_function_set_filter_cb, &data);
2207 }
2208 #else
2209 static int ftrace_function_set_filter(struct perf_event *event,
2210 struct event_filter *filter)
2211 {
2212 return -ENODEV;
2213 }
2214 #endif /* CONFIG_FUNCTION_TRACER */
2215
2216 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2217 char *filter_str)
2218 {
2219 int err;
2220 struct event_filter *filter;
2221 struct ftrace_event_call *call;
2222
2223 mutex_lock(&event_mutex);
2224
2225 call = event->tp_event;
2226
2227 err = -EINVAL;
2228 if (!call)
2229 goto out_unlock;
2230
2231 err = -EEXIST;
2232 if (event->filter)
2233 goto out_unlock;
2234
2235 err = create_filter(call, filter_str, false, &filter);
2236 if (err)
2237 goto free_filter;
2238
2239 if (ftrace_event_is_function(call))
2240 err = ftrace_function_set_filter(event, filter);
2241 else
2242 event->filter = filter;
2243
2244 free_filter:
2245 if (err || ftrace_event_is_function(call))
2246 __free_filter(filter);
2247
2248 out_unlock:
2249 mutex_unlock(&event_mutex);
2250
2251 return err;
2252 }
2253
2254 #endif /* CONFIG_PERF_EVENTS */
2255
2256 #ifdef CONFIG_FTRACE_STARTUP_TEST
2257
2258 #include <linux/types.h>
2259 #include <linux/tracepoint.h>
2260
2261 #define CREATE_TRACE_POINTS
2262 #include "trace_events_filter_test.h"
2263
2264 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2265 { \
2266 .filter = FILTER, \
2267 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2268 .e = ve, .f = vf, .g = vg, .h = vh }, \
2269 .match = m, \
2270 .not_visited = nvisit, \
2271 }
2272 #define YES 1
2273 #define NO 0
2274
2275 static struct test_filter_data_t {
2276 char *filter;
2277 struct ftrace_raw_ftrace_test_filter rec;
2278 int match;
2279 char *not_visited;
2280 } test_filter_data[] = {
2281 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2282 "e == 1 && f == 1 && g == 1 && h == 1"
2283 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2284 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2285 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2286 #undef FILTER
2287 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2288 "e == 1 || f == 1 || g == 1 || h == 1"
2289 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2290 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2291 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2292 #undef FILTER
2293 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2294 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2295 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2296 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2297 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2298 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2299 #undef FILTER
2300 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2301 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2302 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2303 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2304 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2305 #undef FILTER
2306 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2307 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2308 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2309 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2310 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2311 #undef FILTER
2312 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2313 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2314 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2315 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2316 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2317 #undef FILTER
2318 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2319 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2320 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2321 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2322 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2323 #undef FILTER
2324 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2325 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2326 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2327 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2328 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2329 };
2330
2331 #undef DATA_REC
2332 #undef FILTER
2333 #undef YES
2334 #undef NO
2335
2336 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2337
2338 static int test_pred_visited;
2339
2340 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2341 {
2342 struct ftrace_event_field *field = pred->field;
2343
2344 test_pred_visited = 1;
2345 printk(KERN_INFO "\npred visited %s\n", field->name);
2346 return 1;
2347 }
2348
2349 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2350 int *err, void *data)
2351 {
2352 char *fields = data;
2353
2354 if ((move == MOVE_DOWN) &&
2355 (pred->left == FILTER_PRED_INVALID)) {
2356 struct ftrace_event_field *field = pred->field;
2357
2358 if (!field) {
2359 WARN(1, "all leafs should have field defined");
2360 return WALK_PRED_DEFAULT;
2361 }
2362 if (!strchr(fields, *field->name))
2363 return WALK_PRED_DEFAULT;
2364
2365 WARN_ON(!pred->fn);
2366 pred->fn = test_pred_visited_fn;
2367 }
2368 return WALK_PRED_DEFAULT;
2369 }
2370
2371 static __init int ftrace_test_event_filter(void)
2372 {
2373 int i;
2374
2375 printk(KERN_INFO "Testing ftrace filter: ");
2376
2377 for (i = 0; i < DATA_CNT; i++) {
2378 struct event_filter *filter = NULL;
2379 struct test_filter_data_t *d = &test_filter_data[i];
2380 int err;
2381
2382 err = create_filter(&event_ftrace_test_filter, d->filter,
2383 false, &filter);
2384 if (err) {
2385 printk(KERN_INFO
2386 "Failed to get filter for '%s', err %d\n",
2387 d->filter, err);
2388 __free_filter(filter);
2389 break;
2390 }
2391
2392 /*
2393 * The preemption disabling is not really needed for self
2394 * tests, but the rcu dereference will complain without it.
2395 */
2396 preempt_disable();
2397 if (*d->not_visited)
2398 walk_pred_tree(filter->preds, filter->root,
2399 test_walk_pred_cb,
2400 d->not_visited);
2401
2402 test_pred_visited = 0;
2403 err = filter_match_preds(filter, &d->rec);
2404 preempt_enable();
2405
2406 __free_filter(filter);
2407
2408 if (test_pred_visited) {
2409 printk(KERN_INFO
2410 "Failed, unwanted pred visited for filter %s\n",
2411 d->filter);
2412 break;
2413 }
2414
2415 if (err != d->match) {
2416 printk(KERN_INFO
2417 "Failed to match filter '%s', expected %d\n",
2418 d->filter, d->match);
2419 break;
2420 }
2421 }
2422
2423 if (i == DATA_CNT)
2424 printk(KERN_CONT "OK\n");
2425
2426 return 0;
2427 }
2428
2429 late_initcall(ftrace_test_event_filter);
2430
2431 #endif /* CONFIG_FTRACE_STARTUP_TEST */
This page took 0.078581 seconds and 6 git commands to generate.