Merge branch 'drm-armada-fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm into...
[deliverable/linux.git] / kernel / trace / trace_events_filter.c
1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_BAND,
48 OP_NOT,
49 OP_NONE,
50 OP_OPEN_PAREN,
51 };
52
53 struct filter_op {
54 int id;
55 char *string;
56 int precedence;
57 };
58
59 /* Order must be the same as enum filter_op_ids above */
60 static struct filter_op filter_ops[] = {
61 { OP_OR, "||", 1 },
62 { OP_AND, "&&", 2 },
63 { OP_GLOB, "~", 4 },
64 { OP_NE, "!=", 4 },
65 { OP_EQ, "==", 4 },
66 { OP_LT, "<", 5 },
67 { OP_LE, "<=", 5 },
68 { OP_GT, ">", 5 },
69 { OP_GE, ">=", 5 },
70 { OP_BAND, "&", 6 },
71 { OP_NOT, "!", 6 },
72 { OP_NONE, "OP_NONE", 0 },
73 { OP_OPEN_PAREN, "(", 0 },
74 };
75
76 enum {
77 FILT_ERR_NONE,
78 FILT_ERR_INVALID_OP,
79 FILT_ERR_UNBALANCED_PAREN,
80 FILT_ERR_TOO_MANY_OPERANDS,
81 FILT_ERR_OPERAND_TOO_LONG,
82 FILT_ERR_FIELD_NOT_FOUND,
83 FILT_ERR_ILLEGAL_FIELD_OP,
84 FILT_ERR_ILLEGAL_INTVAL,
85 FILT_ERR_BAD_SUBSYS_FILTER,
86 FILT_ERR_TOO_MANY_PREDS,
87 FILT_ERR_MISSING_FIELD,
88 FILT_ERR_INVALID_FILTER,
89 FILT_ERR_IP_FIELD_ONLY,
90 FILT_ERR_ILLEGAL_NOT_OP,
91 };
92
93 static char *err_text[] = {
94 "No error",
95 "Invalid operator",
96 "Unbalanced parens",
97 "Too many operands",
98 "Operand too long",
99 "Field not found",
100 "Illegal operation for field type",
101 "Illegal integer value",
102 "Couldn't find or set field in one of a subsystem's events",
103 "Too many terms in predicate expression",
104 "Missing field name and/or value",
105 "Meaningless filter expression",
106 "Only 'ip' field is supported for function trace",
107 "Illegal use of '!'",
108 };
109
110 struct opstack_op {
111 int op;
112 struct list_head list;
113 };
114
115 struct postfix_elt {
116 int op;
117 char *operand;
118 struct list_head list;
119 };
120
121 struct filter_parse_state {
122 struct filter_op *ops;
123 struct list_head opstack;
124 struct list_head postfix;
125 int lasterr;
126 int lasterr_pos;
127
128 struct {
129 char *string;
130 unsigned int cnt;
131 unsigned int tail;
132 } infix;
133
134 struct {
135 char string[MAX_FILTER_STR_VAL];
136 int pos;
137 unsigned int tail;
138 } operand;
139 };
140
141 struct pred_stack {
142 struct filter_pred **preds;
143 int index;
144 };
145
146 /* If not of not match is equal to not of not, then it is a match */
147 #define DEFINE_COMPARISON_PRED(type) \
148 static int filter_pred_##type(struct filter_pred *pred, void *event) \
149 { \
150 type *addr = (type *)(event + pred->offset); \
151 type val = (type)pred->val; \
152 int match = 0; \
153 \
154 switch (pred->op) { \
155 case OP_LT: \
156 match = (*addr < val); \
157 break; \
158 case OP_LE: \
159 match = (*addr <= val); \
160 break; \
161 case OP_GT: \
162 match = (*addr > val); \
163 break; \
164 case OP_GE: \
165 match = (*addr >= val); \
166 break; \
167 case OP_BAND: \
168 match = (*addr & val); \
169 break; \
170 default: \
171 break; \
172 } \
173 \
174 return !!match == !pred->not; \
175 }
176
177 #define DEFINE_EQUALITY_PRED(size) \
178 static int filter_pred_##size(struct filter_pred *pred, void *event) \
179 { \
180 u##size *addr = (u##size *)(event + pred->offset); \
181 u##size val = (u##size)pred->val; \
182 int match; \
183 \
184 match = (val == *addr) ^ pred->not; \
185 \
186 return match; \
187 }
188
189 DEFINE_COMPARISON_PRED(s64);
190 DEFINE_COMPARISON_PRED(u64);
191 DEFINE_COMPARISON_PRED(s32);
192 DEFINE_COMPARISON_PRED(u32);
193 DEFINE_COMPARISON_PRED(s16);
194 DEFINE_COMPARISON_PRED(u16);
195 DEFINE_COMPARISON_PRED(s8);
196 DEFINE_COMPARISON_PRED(u8);
197
198 DEFINE_EQUALITY_PRED(64);
199 DEFINE_EQUALITY_PRED(32);
200 DEFINE_EQUALITY_PRED(16);
201 DEFINE_EQUALITY_PRED(8);
202
203 /* Filter predicate for fixed sized arrays of characters */
204 static int filter_pred_string(struct filter_pred *pred, void *event)
205 {
206 char *addr = (char *)(event + pred->offset);
207 int cmp, match;
208
209 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
210
211 match = cmp ^ pred->not;
212
213 return match;
214 }
215
216 /* Filter predicate for char * pointers */
217 static int filter_pred_pchar(struct filter_pred *pred, void *event)
218 {
219 char **addr = (char **)(event + pred->offset);
220 int cmp, match;
221 int len = strlen(*addr) + 1; /* including tailing '\0' */
222
223 cmp = pred->regex.match(*addr, &pred->regex, len);
224
225 match = cmp ^ pred->not;
226
227 return match;
228 }
229
230 /*
231 * Filter predicate for dynamic sized arrays of characters.
232 * These are implemented through a list of strings at the end
233 * of the entry.
234 * Also each of these strings have a field in the entry which
235 * contains its offset from the beginning of the entry.
236 * We have then first to get this field, dereference it
237 * and add it to the address of the entry, and at last we have
238 * the address of the string.
239 */
240 static int filter_pred_strloc(struct filter_pred *pred, void *event)
241 {
242 u32 str_item = *(u32 *)(event + pred->offset);
243 int str_loc = str_item & 0xffff;
244 int str_len = str_item >> 16;
245 char *addr = (char *)(event + str_loc);
246 int cmp, match;
247
248 cmp = pred->regex.match(addr, &pred->regex, str_len);
249
250 match = cmp ^ pred->not;
251
252 return match;
253 }
254
255 static int filter_pred_none(struct filter_pred *pred, void *event)
256 {
257 return 0;
258 }
259
260 /*
261 * regex_match_foo - Basic regex callbacks
262 *
263 * @str: the string to be searched
264 * @r: the regex structure containing the pattern string
265 * @len: the length of the string to be searched (including '\0')
266 *
267 * Note:
268 * - @str might not be NULL-terminated if it's of type DYN_STRING
269 * or STATIC_STRING
270 */
271
272 static int regex_match_full(char *str, struct regex *r, int len)
273 {
274 if (strncmp(str, r->pattern, len) == 0)
275 return 1;
276 return 0;
277 }
278
279 static int regex_match_front(char *str, struct regex *r, int len)
280 {
281 if (strncmp(str, r->pattern, r->len) == 0)
282 return 1;
283 return 0;
284 }
285
286 static int regex_match_middle(char *str, struct regex *r, int len)
287 {
288 if (strnstr(str, r->pattern, len))
289 return 1;
290 return 0;
291 }
292
293 static int regex_match_end(char *str, struct regex *r, int len)
294 {
295 int strlen = len - 1;
296
297 if (strlen >= r->len &&
298 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
299 return 1;
300 return 0;
301 }
302
303 /**
304 * filter_parse_regex - parse a basic regex
305 * @buff: the raw regex
306 * @len: length of the regex
307 * @search: will point to the beginning of the string to compare
308 * @not: tell whether the match will have to be inverted
309 *
310 * This passes in a buffer containing a regex and this function will
311 * set search to point to the search part of the buffer and
312 * return the type of search it is (see enum above).
313 * This does modify buff.
314 *
315 * Returns enum type.
316 * search returns the pointer to use for comparison.
317 * not returns 1 if buff started with a '!'
318 * 0 otherwise.
319 */
320 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
321 {
322 int type = MATCH_FULL;
323 int i;
324
325 if (buff[0] == '!') {
326 *not = 1;
327 buff++;
328 len--;
329 } else
330 *not = 0;
331
332 *search = buff;
333
334 for (i = 0; i < len; i++) {
335 if (buff[i] == '*') {
336 if (!i) {
337 *search = buff + 1;
338 type = MATCH_END_ONLY;
339 } else {
340 if (type == MATCH_END_ONLY)
341 type = MATCH_MIDDLE_ONLY;
342 else
343 type = MATCH_FRONT_ONLY;
344 buff[i] = 0;
345 break;
346 }
347 }
348 }
349
350 return type;
351 }
352
353 static void filter_build_regex(struct filter_pred *pred)
354 {
355 struct regex *r = &pred->regex;
356 char *search;
357 enum regex_type type = MATCH_FULL;
358 int not = 0;
359
360 if (pred->op == OP_GLOB) {
361 type = filter_parse_regex(r->pattern, r->len, &search, &not);
362 r->len = strlen(search);
363 memmove(r->pattern, search, r->len+1);
364 }
365
366 switch (type) {
367 case MATCH_FULL:
368 r->match = regex_match_full;
369 break;
370 case MATCH_FRONT_ONLY:
371 r->match = regex_match_front;
372 break;
373 case MATCH_MIDDLE_ONLY:
374 r->match = regex_match_middle;
375 break;
376 case MATCH_END_ONLY:
377 r->match = regex_match_end;
378 break;
379 }
380
381 pred->not ^= not;
382 }
383
384 enum move_type {
385 MOVE_DOWN,
386 MOVE_UP_FROM_LEFT,
387 MOVE_UP_FROM_RIGHT
388 };
389
390 static struct filter_pred *
391 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
392 int index, enum move_type *move)
393 {
394 if (pred->parent & FILTER_PRED_IS_RIGHT)
395 *move = MOVE_UP_FROM_RIGHT;
396 else
397 *move = MOVE_UP_FROM_LEFT;
398 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
399
400 return pred;
401 }
402
403 enum walk_return {
404 WALK_PRED_ABORT,
405 WALK_PRED_PARENT,
406 WALK_PRED_DEFAULT,
407 };
408
409 typedef int (*filter_pred_walkcb_t) (enum move_type move,
410 struct filter_pred *pred,
411 int *err, void *data);
412
413 static int walk_pred_tree(struct filter_pred *preds,
414 struct filter_pred *root,
415 filter_pred_walkcb_t cb, void *data)
416 {
417 struct filter_pred *pred = root;
418 enum move_type move = MOVE_DOWN;
419 int done = 0;
420
421 if (!preds)
422 return -EINVAL;
423
424 do {
425 int err = 0, ret;
426
427 ret = cb(move, pred, &err, data);
428 if (ret == WALK_PRED_ABORT)
429 return err;
430 if (ret == WALK_PRED_PARENT)
431 goto get_parent;
432
433 switch (move) {
434 case MOVE_DOWN:
435 if (pred->left != FILTER_PRED_INVALID) {
436 pred = &preds[pred->left];
437 continue;
438 }
439 goto get_parent;
440 case MOVE_UP_FROM_LEFT:
441 pred = &preds[pred->right];
442 move = MOVE_DOWN;
443 continue;
444 case MOVE_UP_FROM_RIGHT:
445 get_parent:
446 if (pred == root)
447 break;
448 pred = get_pred_parent(pred, preds,
449 pred->parent,
450 &move);
451 continue;
452 }
453 done = 1;
454 } while (!done);
455
456 /* We are fine. */
457 return 0;
458 }
459
460 /*
461 * A series of AND or ORs where found together. Instead of
462 * climbing up and down the tree branches, an array of the
463 * ops were made in order of checks. We can just move across
464 * the array and short circuit if needed.
465 */
466 static int process_ops(struct filter_pred *preds,
467 struct filter_pred *op, void *rec)
468 {
469 struct filter_pred *pred;
470 int match = 0;
471 int type;
472 int i;
473
474 /*
475 * Micro-optimization: We set type to true if op
476 * is an OR and false otherwise (AND). Then we
477 * just need to test if the match is equal to
478 * the type, and if it is, we can short circuit the
479 * rest of the checks:
480 *
481 * if ((match && op->op == OP_OR) ||
482 * (!match && op->op == OP_AND))
483 * return match;
484 */
485 type = op->op == OP_OR;
486
487 for (i = 0; i < op->val; i++) {
488 pred = &preds[op->ops[i]];
489 if (!WARN_ON_ONCE(!pred->fn))
490 match = pred->fn(pred, rec);
491 if (!!match == type)
492 break;
493 }
494 /* If not of not match is equal to not of not, then it is a match */
495 return !!match == !op->not;
496 }
497
498 struct filter_match_preds_data {
499 struct filter_pred *preds;
500 int match;
501 void *rec;
502 };
503
504 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
505 int *err, void *data)
506 {
507 struct filter_match_preds_data *d = data;
508
509 *err = 0;
510 switch (move) {
511 case MOVE_DOWN:
512 /* only AND and OR have children */
513 if (pred->left != FILTER_PRED_INVALID) {
514 /* If ops is set, then it was folded. */
515 if (!pred->ops)
516 return WALK_PRED_DEFAULT;
517 /* We can treat folded ops as a leaf node */
518 d->match = process_ops(d->preds, pred, d->rec);
519 } else {
520 if (!WARN_ON_ONCE(!pred->fn))
521 d->match = pred->fn(pred, d->rec);
522 }
523
524 return WALK_PRED_PARENT;
525 case MOVE_UP_FROM_LEFT:
526 /*
527 * Check for short circuits.
528 *
529 * Optimization: !!match == (pred->op == OP_OR)
530 * is the same as:
531 * if ((match && pred->op == OP_OR) ||
532 * (!match && pred->op == OP_AND))
533 */
534 if (!!d->match == (pred->op == OP_OR))
535 return WALK_PRED_PARENT;
536 break;
537 case MOVE_UP_FROM_RIGHT:
538 break;
539 }
540
541 return WALK_PRED_DEFAULT;
542 }
543
544 /* return 1 if event matches, 0 otherwise (discard) */
545 int filter_match_preds(struct event_filter *filter, void *rec)
546 {
547 struct filter_pred *preds;
548 struct filter_pred *root;
549 struct filter_match_preds_data data = {
550 /* match is currently meaningless */
551 .match = -1,
552 .rec = rec,
553 };
554 int n_preds, ret;
555
556 /* no filter is considered a match */
557 if (!filter)
558 return 1;
559
560 n_preds = filter->n_preds;
561 if (!n_preds)
562 return 1;
563
564 /*
565 * n_preds, root and filter->preds are protect with preemption disabled.
566 */
567 root = rcu_dereference_sched(filter->root);
568 if (!root)
569 return 1;
570
571 data.preds = preds = rcu_dereference_sched(filter->preds);
572 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
573 WARN_ON(ret);
574 return data.match;
575 }
576 EXPORT_SYMBOL_GPL(filter_match_preds);
577
578 static void parse_error(struct filter_parse_state *ps, int err, int pos)
579 {
580 ps->lasterr = err;
581 ps->lasterr_pos = pos;
582 }
583
584 static void remove_filter_string(struct event_filter *filter)
585 {
586 if (!filter)
587 return;
588
589 kfree(filter->filter_string);
590 filter->filter_string = NULL;
591 }
592
593 static int replace_filter_string(struct event_filter *filter,
594 char *filter_string)
595 {
596 kfree(filter->filter_string);
597 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
598 if (!filter->filter_string)
599 return -ENOMEM;
600
601 return 0;
602 }
603
604 static int append_filter_string(struct event_filter *filter,
605 char *string)
606 {
607 int newlen;
608 char *new_filter_string;
609
610 BUG_ON(!filter->filter_string);
611 newlen = strlen(filter->filter_string) + strlen(string) + 1;
612 new_filter_string = kmalloc(newlen, GFP_KERNEL);
613 if (!new_filter_string)
614 return -ENOMEM;
615
616 strcpy(new_filter_string, filter->filter_string);
617 strcat(new_filter_string, string);
618 kfree(filter->filter_string);
619 filter->filter_string = new_filter_string;
620
621 return 0;
622 }
623
624 static void append_filter_err(struct filter_parse_state *ps,
625 struct event_filter *filter)
626 {
627 int pos = ps->lasterr_pos;
628 char *buf, *pbuf;
629
630 buf = (char *)__get_free_page(GFP_TEMPORARY);
631 if (!buf)
632 return;
633
634 append_filter_string(filter, "\n");
635 memset(buf, ' ', PAGE_SIZE);
636 if (pos > PAGE_SIZE - 128)
637 pos = 0;
638 buf[pos] = '^';
639 pbuf = &buf[pos] + 1;
640
641 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
642 append_filter_string(filter, buf);
643 free_page((unsigned long) buf);
644 }
645
646 static inline struct event_filter *event_filter(struct trace_event_file *file)
647 {
648 if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
649 return file->event_call->filter;
650 else
651 return file->filter;
652 }
653
654 /* caller must hold event_mutex */
655 void print_event_filter(struct trace_event_file *file, struct trace_seq *s)
656 {
657 struct event_filter *filter = event_filter(file);
658
659 if (filter && filter->filter_string)
660 trace_seq_printf(s, "%s\n", filter->filter_string);
661 else
662 trace_seq_puts(s, "none\n");
663 }
664
665 void print_subsystem_event_filter(struct event_subsystem *system,
666 struct trace_seq *s)
667 {
668 struct event_filter *filter;
669
670 mutex_lock(&event_mutex);
671 filter = system->filter;
672 if (filter && filter->filter_string)
673 trace_seq_printf(s, "%s\n", filter->filter_string);
674 else
675 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
676 mutex_unlock(&event_mutex);
677 }
678
679 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
680 {
681 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
682 if (!stack->preds)
683 return -ENOMEM;
684 stack->index = n_preds;
685 return 0;
686 }
687
688 static void __free_pred_stack(struct pred_stack *stack)
689 {
690 kfree(stack->preds);
691 stack->index = 0;
692 }
693
694 static int __push_pred_stack(struct pred_stack *stack,
695 struct filter_pred *pred)
696 {
697 int index = stack->index;
698
699 if (WARN_ON(index == 0))
700 return -ENOSPC;
701
702 stack->preds[--index] = pred;
703 stack->index = index;
704 return 0;
705 }
706
707 static struct filter_pred *
708 __pop_pred_stack(struct pred_stack *stack)
709 {
710 struct filter_pred *pred;
711 int index = stack->index;
712
713 pred = stack->preds[index++];
714 if (!pred)
715 return NULL;
716
717 stack->index = index;
718 return pred;
719 }
720
721 static int filter_set_pred(struct event_filter *filter,
722 int idx,
723 struct pred_stack *stack,
724 struct filter_pred *src)
725 {
726 struct filter_pred *dest = &filter->preds[idx];
727 struct filter_pred *left;
728 struct filter_pred *right;
729
730 *dest = *src;
731 dest->index = idx;
732
733 if (dest->op == OP_OR || dest->op == OP_AND) {
734 right = __pop_pred_stack(stack);
735 left = __pop_pred_stack(stack);
736 if (!left || !right)
737 return -EINVAL;
738 /*
739 * If both children can be folded
740 * and they are the same op as this op or a leaf,
741 * then this op can be folded.
742 */
743 if (left->index & FILTER_PRED_FOLD &&
744 ((left->op == dest->op && !left->not) ||
745 left->left == FILTER_PRED_INVALID) &&
746 right->index & FILTER_PRED_FOLD &&
747 ((right->op == dest->op && !right->not) ||
748 right->left == FILTER_PRED_INVALID))
749 dest->index |= FILTER_PRED_FOLD;
750
751 dest->left = left->index & ~FILTER_PRED_FOLD;
752 dest->right = right->index & ~FILTER_PRED_FOLD;
753 left->parent = dest->index & ~FILTER_PRED_FOLD;
754 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
755 } else {
756 /*
757 * Make dest->left invalid to be used as a quick
758 * way to know this is a leaf node.
759 */
760 dest->left = FILTER_PRED_INVALID;
761
762 /* All leafs allow folding the parent ops. */
763 dest->index |= FILTER_PRED_FOLD;
764 }
765
766 return __push_pred_stack(stack, dest);
767 }
768
769 static void __free_preds(struct event_filter *filter)
770 {
771 int i;
772
773 if (filter->preds) {
774 for (i = 0; i < filter->n_preds; i++)
775 kfree(filter->preds[i].ops);
776 kfree(filter->preds);
777 filter->preds = NULL;
778 }
779 filter->a_preds = 0;
780 filter->n_preds = 0;
781 }
782
783 static void filter_disable(struct trace_event_file *file)
784 {
785 struct trace_event_call *call = file->event_call;
786
787 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
788 call->flags &= ~TRACE_EVENT_FL_FILTERED;
789 else
790 file->flags &= ~EVENT_FILE_FL_FILTERED;
791 }
792
793 static void __free_filter(struct event_filter *filter)
794 {
795 if (!filter)
796 return;
797
798 __free_preds(filter);
799 kfree(filter->filter_string);
800 kfree(filter);
801 }
802
803 void free_event_filter(struct event_filter *filter)
804 {
805 __free_filter(filter);
806 }
807
808 static struct event_filter *__alloc_filter(void)
809 {
810 struct event_filter *filter;
811
812 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
813 return filter;
814 }
815
816 static int __alloc_preds(struct event_filter *filter, int n_preds)
817 {
818 struct filter_pred *pred;
819 int i;
820
821 if (filter->preds)
822 __free_preds(filter);
823
824 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
825
826 if (!filter->preds)
827 return -ENOMEM;
828
829 filter->a_preds = n_preds;
830 filter->n_preds = 0;
831
832 for (i = 0; i < n_preds; i++) {
833 pred = &filter->preds[i];
834 pred->fn = filter_pred_none;
835 }
836
837 return 0;
838 }
839
840 static inline void __remove_filter(struct trace_event_file *file)
841 {
842 struct trace_event_call *call = file->event_call;
843
844 filter_disable(file);
845 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
846 remove_filter_string(call->filter);
847 else
848 remove_filter_string(file->filter);
849 }
850
851 static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir,
852 struct trace_array *tr)
853 {
854 struct trace_event_file *file;
855
856 list_for_each_entry(file, &tr->events, list) {
857 if (file->system != dir)
858 continue;
859 __remove_filter(file);
860 }
861 }
862
863 static inline void __free_subsystem_filter(struct trace_event_file *file)
864 {
865 struct trace_event_call *call = file->event_call;
866
867 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
868 __free_filter(call->filter);
869 call->filter = NULL;
870 } else {
871 __free_filter(file->filter);
872 file->filter = NULL;
873 }
874 }
875
876 static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
877 struct trace_array *tr)
878 {
879 struct trace_event_file *file;
880
881 list_for_each_entry(file, &tr->events, list) {
882 if (file->system != dir)
883 continue;
884 __free_subsystem_filter(file);
885 }
886 }
887
888 static int filter_add_pred(struct filter_parse_state *ps,
889 struct event_filter *filter,
890 struct filter_pred *pred,
891 struct pred_stack *stack)
892 {
893 int err;
894
895 if (WARN_ON(filter->n_preds == filter->a_preds)) {
896 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
897 return -ENOSPC;
898 }
899
900 err = filter_set_pred(filter, filter->n_preds, stack, pred);
901 if (err)
902 return err;
903
904 filter->n_preds++;
905
906 return 0;
907 }
908
909 int filter_assign_type(const char *type)
910 {
911 if (strstr(type, "__data_loc") && strstr(type, "char"))
912 return FILTER_DYN_STRING;
913
914 if (strchr(type, '[') && strstr(type, "char"))
915 return FILTER_STATIC_STRING;
916
917 return FILTER_OTHER;
918 }
919
920 static bool is_function_field(struct ftrace_event_field *field)
921 {
922 return field->filter_type == FILTER_TRACE_FN;
923 }
924
925 static bool is_string_field(struct ftrace_event_field *field)
926 {
927 return field->filter_type == FILTER_DYN_STRING ||
928 field->filter_type == FILTER_STATIC_STRING ||
929 field->filter_type == FILTER_PTR_STRING;
930 }
931
932 static int is_legal_op(struct ftrace_event_field *field, int op)
933 {
934 if (is_string_field(field) &&
935 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
936 return 0;
937 if (!is_string_field(field) && op == OP_GLOB)
938 return 0;
939
940 return 1;
941 }
942
943 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
944 int field_is_signed)
945 {
946 filter_pred_fn_t fn = NULL;
947
948 switch (field_size) {
949 case 8:
950 if (op == OP_EQ || op == OP_NE)
951 fn = filter_pred_64;
952 else if (field_is_signed)
953 fn = filter_pred_s64;
954 else
955 fn = filter_pred_u64;
956 break;
957 case 4:
958 if (op == OP_EQ || op == OP_NE)
959 fn = filter_pred_32;
960 else if (field_is_signed)
961 fn = filter_pred_s32;
962 else
963 fn = filter_pred_u32;
964 break;
965 case 2:
966 if (op == OP_EQ || op == OP_NE)
967 fn = filter_pred_16;
968 else if (field_is_signed)
969 fn = filter_pred_s16;
970 else
971 fn = filter_pred_u16;
972 break;
973 case 1:
974 if (op == OP_EQ || op == OP_NE)
975 fn = filter_pred_8;
976 else if (field_is_signed)
977 fn = filter_pred_s8;
978 else
979 fn = filter_pred_u8;
980 break;
981 }
982
983 return fn;
984 }
985
986 static int init_pred(struct filter_parse_state *ps,
987 struct ftrace_event_field *field,
988 struct filter_pred *pred)
989
990 {
991 filter_pred_fn_t fn = filter_pred_none;
992 unsigned long long val;
993 int ret;
994
995 pred->offset = field->offset;
996
997 if (!is_legal_op(field, pred->op)) {
998 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
999 return -EINVAL;
1000 }
1001
1002 if (is_string_field(field)) {
1003 filter_build_regex(pred);
1004
1005 if (field->filter_type == FILTER_STATIC_STRING) {
1006 fn = filter_pred_string;
1007 pred->regex.field_len = field->size;
1008 } else if (field->filter_type == FILTER_DYN_STRING)
1009 fn = filter_pred_strloc;
1010 else
1011 fn = filter_pred_pchar;
1012 } else if (is_function_field(field)) {
1013 if (strcmp(field->name, "ip")) {
1014 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1015 return -EINVAL;
1016 }
1017 } else {
1018 if (field->is_signed)
1019 ret = kstrtoll(pred->regex.pattern, 0, &val);
1020 else
1021 ret = kstrtoull(pred->regex.pattern, 0, &val);
1022 if (ret) {
1023 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1024 return -EINVAL;
1025 }
1026 pred->val = val;
1027
1028 fn = select_comparison_fn(pred->op, field->size,
1029 field->is_signed);
1030 if (!fn) {
1031 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1032 return -EINVAL;
1033 }
1034 }
1035
1036 if (pred->op == OP_NE)
1037 pred->not ^= 1;
1038
1039 pred->fn = fn;
1040 return 0;
1041 }
1042
1043 static void parse_init(struct filter_parse_state *ps,
1044 struct filter_op *ops,
1045 char *infix_string)
1046 {
1047 memset(ps, '\0', sizeof(*ps));
1048
1049 ps->infix.string = infix_string;
1050 ps->infix.cnt = strlen(infix_string);
1051 ps->ops = ops;
1052
1053 INIT_LIST_HEAD(&ps->opstack);
1054 INIT_LIST_HEAD(&ps->postfix);
1055 }
1056
1057 static char infix_next(struct filter_parse_state *ps)
1058 {
1059 if (!ps->infix.cnt)
1060 return 0;
1061
1062 ps->infix.cnt--;
1063
1064 return ps->infix.string[ps->infix.tail++];
1065 }
1066
1067 static char infix_peek(struct filter_parse_state *ps)
1068 {
1069 if (ps->infix.tail == strlen(ps->infix.string))
1070 return 0;
1071
1072 return ps->infix.string[ps->infix.tail];
1073 }
1074
1075 static void infix_advance(struct filter_parse_state *ps)
1076 {
1077 if (!ps->infix.cnt)
1078 return;
1079
1080 ps->infix.cnt--;
1081 ps->infix.tail++;
1082 }
1083
1084 static inline int is_precedence_lower(struct filter_parse_state *ps,
1085 int a, int b)
1086 {
1087 return ps->ops[a].precedence < ps->ops[b].precedence;
1088 }
1089
1090 static inline int is_op_char(struct filter_parse_state *ps, char c)
1091 {
1092 int i;
1093
1094 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1095 if (ps->ops[i].string[0] == c)
1096 return 1;
1097 }
1098
1099 return 0;
1100 }
1101
1102 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1103 {
1104 char nextc = infix_peek(ps);
1105 char opstr[3];
1106 int i;
1107
1108 opstr[0] = firstc;
1109 opstr[1] = nextc;
1110 opstr[2] = '\0';
1111
1112 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1113 if (!strcmp(opstr, ps->ops[i].string)) {
1114 infix_advance(ps);
1115 return ps->ops[i].id;
1116 }
1117 }
1118
1119 opstr[1] = '\0';
1120
1121 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1122 if (!strcmp(opstr, ps->ops[i].string))
1123 return ps->ops[i].id;
1124 }
1125
1126 return OP_NONE;
1127 }
1128
1129 static inline void clear_operand_string(struct filter_parse_state *ps)
1130 {
1131 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1132 ps->operand.tail = 0;
1133 }
1134
1135 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1136 {
1137 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1138 return -EINVAL;
1139
1140 ps->operand.string[ps->operand.tail++] = c;
1141
1142 return 0;
1143 }
1144
1145 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1146 {
1147 struct opstack_op *opstack_op;
1148
1149 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1150 if (!opstack_op)
1151 return -ENOMEM;
1152
1153 opstack_op->op = op;
1154 list_add(&opstack_op->list, &ps->opstack);
1155
1156 return 0;
1157 }
1158
1159 static int filter_opstack_empty(struct filter_parse_state *ps)
1160 {
1161 return list_empty(&ps->opstack);
1162 }
1163
1164 static int filter_opstack_top(struct filter_parse_state *ps)
1165 {
1166 struct opstack_op *opstack_op;
1167
1168 if (filter_opstack_empty(ps))
1169 return OP_NONE;
1170
1171 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1172
1173 return opstack_op->op;
1174 }
1175
1176 static int filter_opstack_pop(struct filter_parse_state *ps)
1177 {
1178 struct opstack_op *opstack_op;
1179 int op;
1180
1181 if (filter_opstack_empty(ps))
1182 return OP_NONE;
1183
1184 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1185 op = opstack_op->op;
1186 list_del(&opstack_op->list);
1187
1188 kfree(opstack_op);
1189
1190 return op;
1191 }
1192
1193 static void filter_opstack_clear(struct filter_parse_state *ps)
1194 {
1195 while (!filter_opstack_empty(ps))
1196 filter_opstack_pop(ps);
1197 }
1198
1199 static char *curr_operand(struct filter_parse_state *ps)
1200 {
1201 return ps->operand.string;
1202 }
1203
1204 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1205 {
1206 struct postfix_elt *elt;
1207
1208 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1209 if (!elt)
1210 return -ENOMEM;
1211
1212 elt->op = OP_NONE;
1213 elt->operand = kstrdup(operand, GFP_KERNEL);
1214 if (!elt->operand) {
1215 kfree(elt);
1216 return -ENOMEM;
1217 }
1218
1219 list_add_tail(&elt->list, &ps->postfix);
1220
1221 return 0;
1222 }
1223
1224 static int postfix_append_op(struct filter_parse_state *ps, int op)
1225 {
1226 struct postfix_elt *elt;
1227
1228 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1229 if (!elt)
1230 return -ENOMEM;
1231
1232 elt->op = op;
1233 elt->operand = NULL;
1234
1235 list_add_tail(&elt->list, &ps->postfix);
1236
1237 return 0;
1238 }
1239
1240 static void postfix_clear(struct filter_parse_state *ps)
1241 {
1242 struct postfix_elt *elt;
1243
1244 while (!list_empty(&ps->postfix)) {
1245 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1246 list_del(&elt->list);
1247 kfree(elt->operand);
1248 kfree(elt);
1249 }
1250 }
1251
1252 static int filter_parse(struct filter_parse_state *ps)
1253 {
1254 int in_string = 0;
1255 int op, top_op;
1256 char ch;
1257
1258 while ((ch = infix_next(ps))) {
1259 if (ch == '"') {
1260 in_string ^= 1;
1261 continue;
1262 }
1263
1264 if (in_string)
1265 goto parse_operand;
1266
1267 if (isspace(ch))
1268 continue;
1269
1270 if (is_op_char(ps, ch)) {
1271 op = infix_get_op(ps, ch);
1272 if (op == OP_NONE) {
1273 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1274 return -EINVAL;
1275 }
1276
1277 if (strlen(curr_operand(ps))) {
1278 postfix_append_operand(ps, curr_operand(ps));
1279 clear_operand_string(ps);
1280 }
1281
1282 while (!filter_opstack_empty(ps)) {
1283 top_op = filter_opstack_top(ps);
1284 if (!is_precedence_lower(ps, top_op, op)) {
1285 top_op = filter_opstack_pop(ps);
1286 postfix_append_op(ps, top_op);
1287 continue;
1288 }
1289 break;
1290 }
1291
1292 filter_opstack_push(ps, op);
1293 continue;
1294 }
1295
1296 if (ch == '(') {
1297 filter_opstack_push(ps, OP_OPEN_PAREN);
1298 continue;
1299 }
1300
1301 if (ch == ')') {
1302 if (strlen(curr_operand(ps))) {
1303 postfix_append_operand(ps, curr_operand(ps));
1304 clear_operand_string(ps);
1305 }
1306
1307 top_op = filter_opstack_pop(ps);
1308 while (top_op != OP_NONE) {
1309 if (top_op == OP_OPEN_PAREN)
1310 break;
1311 postfix_append_op(ps, top_op);
1312 top_op = filter_opstack_pop(ps);
1313 }
1314 if (top_op == OP_NONE) {
1315 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1316 return -EINVAL;
1317 }
1318 continue;
1319 }
1320 parse_operand:
1321 if (append_operand_char(ps, ch)) {
1322 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1323 return -EINVAL;
1324 }
1325 }
1326
1327 if (strlen(curr_operand(ps)))
1328 postfix_append_operand(ps, curr_operand(ps));
1329
1330 while (!filter_opstack_empty(ps)) {
1331 top_op = filter_opstack_pop(ps);
1332 if (top_op == OP_NONE)
1333 break;
1334 if (top_op == OP_OPEN_PAREN) {
1335 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1336 return -EINVAL;
1337 }
1338 postfix_append_op(ps, top_op);
1339 }
1340
1341 return 0;
1342 }
1343
1344 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1345 struct trace_event_call *call,
1346 int op, char *operand1, char *operand2)
1347 {
1348 struct ftrace_event_field *field;
1349 static struct filter_pred pred;
1350
1351 memset(&pred, 0, sizeof(pred));
1352 pred.op = op;
1353
1354 if (op == OP_AND || op == OP_OR)
1355 return &pred;
1356
1357 if (!operand1 || !operand2) {
1358 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1359 return NULL;
1360 }
1361
1362 field = trace_find_event_field(call, operand1);
1363 if (!field) {
1364 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1365 return NULL;
1366 }
1367
1368 strcpy(pred.regex.pattern, operand2);
1369 pred.regex.len = strlen(pred.regex.pattern);
1370 pred.field = field;
1371 return init_pred(ps, field, &pred) ? NULL : &pred;
1372 }
1373
1374 static int check_preds(struct filter_parse_state *ps)
1375 {
1376 int n_normal_preds = 0, n_logical_preds = 0;
1377 struct postfix_elt *elt;
1378 int cnt = 0;
1379
1380 list_for_each_entry(elt, &ps->postfix, list) {
1381 if (elt->op == OP_NONE) {
1382 cnt++;
1383 continue;
1384 }
1385
1386 if (elt->op == OP_AND || elt->op == OP_OR) {
1387 n_logical_preds++;
1388 cnt--;
1389 continue;
1390 }
1391 if (elt->op != OP_NOT)
1392 cnt--;
1393 n_normal_preds++;
1394 /* all ops should have operands */
1395 if (cnt < 0)
1396 break;
1397 }
1398
1399 if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
1400 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1401 return -EINVAL;
1402 }
1403
1404 return 0;
1405 }
1406
1407 static int count_preds(struct filter_parse_state *ps)
1408 {
1409 struct postfix_elt *elt;
1410 int n_preds = 0;
1411
1412 list_for_each_entry(elt, &ps->postfix, list) {
1413 if (elt->op == OP_NONE)
1414 continue;
1415 n_preds++;
1416 }
1417
1418 return n_preds;
1419 }
1420
1421 struct check_pred_data {
1422 int count;
1423 int max;
1424 };
1425
1426 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1427 int *err, void *data)
1428 {
1429 struct check_pred_data *d = data;
1430
1431 if (WARN_ON(d->count++ > d->max)) {
1432 *err = -EINVAL;
1433 return WALK_PRED_ABORT;
1434 }
1435 return WALK_PRED_DEFAULT;
1436 }
1437
1438 /*
1439 * The tree is walked at filtering of an event. If the tree is not correctly
1440 * built, it may cause an infinite loop. Check here that the tree does
1441 * indeed terminate.
1442 */
1443 static int check_pred_tree(struct event_filter *filter,
1444 struct filter_pred *root)
1445 {
1446 struct check_pred_data data = {
1447 /*
1448 * The max that we can hit a node is three times.
1449 * Once going down, once coming up from left, and
1450 * once coming up from right. This is more than enough
1451 * since leafs are only hit a single time.
1452 */
1453 .max = 3 * filter->n_preds,
1454 .count = 0,
1455 };
1456
1457 return walk_pred_tree(filter->preds, root,
1458 check_pred_tree_cb, &data);
1459 }
1460
1461 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1462 int *err, void *data)
1463 {
1464 int *count = data;
1465
1466 if ((move == MOVE_DOWN) &&
1467 (pred->left == FILTER_PRED_INVALID))
1468 (*count)++;
1469
1470 return WALK_PRED_DEFAULT;
1471 }
1472
1473 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1474 {
1475 int count = 0, ret;
1476
1477 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1478 WARN_ON(ret);
1479 return count;
1480 }
1481
1482 struct fold_pred_data {
1483 struct filter_pred *root;
1484 int count;
1485 int children;
1486 };
1487
1488 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1489 int *err, void *data)
1490 {
1491 struct fold_pred_data *d = data;
1492 struct filter_pred *root = d->root;
1493
1494 if (move != MOVE_DOWN)
1495 return WALK_PRED_DEFAULT;
1496 if (pred->left != FILTER_PRED_INVALID)
1497 return WALK_PRED_DEFAULT;
1498
1499 if (WARN_ON(d->count == d->children)) {
1500 *err = -EINVAL;
1501 return WALK_PRED_ABORT;
1502 }
1503
1504 pred->index &= ~FILTER_PRED_FOLD;
1505 root->ops[d->count++] = pred->index;
1506 return WALK_PRED_DEFAULT;
1507 }
1508
1509 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1510 {
1511 struct fold_pred_data data = {
1512 .root = root,
1513 .count = 0,
1514 };
1515 int children;
1516
1517 /* No need to keep the fold flag */
1518 root->index &= ~FILTER_PRED_FOLD;
1519
1520 /* If the root is a leaf then do nothing */
1521 if (root->left == FILTER_PRED_INVALID)
1522 return 0;
1523
1524 /* count the children */
1525 children = count_leafs(preds, &preds[root->left]);
1526 children += count_leafs(preds, &preds[root->right]);
1527
1528 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1529 if (!root->ops)
1530 return -ENOMEM;
1531
1532 root->val = children;
1533 data.children = children;
1534 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1535 }
1536
1537 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1538 int *err, void *data)
1539 {
1540 struct filter_pred *preds = data;
1541
1542 if (move != MOVE_DOWN)
1543 return WALK_PRED_DEFAULT;
1544 if (!(pred->index & FILTER_PRED_FOLD))
1545 return WALK_PRED_DEFAULT;
1546
1547 *err = fold_pred(preds, pred);
1548 if (*err)
1549 return WALK_PRED_ABORT;
1550
1551 /* eveyrhing below is folded, continue with parent */
1552 return WALK_PRED_PARENT;
1553 }
1554
1555 /*
1556 * To optimize the processing of the ops, if we have several "ors" or
1557 * "ands" together, we can put them in an array and process them all
1558 * together speeding up the filter logic.
1559 */
1560 static int fold_pred_tree(struct event_filter *filter,
1561 struct filter_pred *root)
1562 {
1563 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1564 filter->preds);
1565 }
1566
1567 static int replace_preds(struct trace_event_call *call,
1568 struct event_filter *filter,
1569 struct filter_parse_state *ps,
1570 bool dry_run)
1571 {
1572 char *operand1 = NULL, *operand2 = NULL;
1573 struct filter_pred *pred;
1574 struct filter_pred *root;
1575 struct postfix_elt *elt;
1576 struct pred_stack stack = { }; /* init to NULL */
1577 int err;
1578 int n_preds = 0;
1579
1580 n_preds = count_preds(ps);
1581 if (n_preds >= MAX_FILTER_PRED) {
1582 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1583 return -ENOSPC;
1584 }
1585
1586 err = check_preds(ps);
1587 if (err)
1588 return err;
1589
1590 if (!dry_run) {
1591 err = __alloc_pred_stack(&stack, n_preds);
1592 if (err)
1593 return err;
1594 err = __alloc_preds(filter, n_preds);
1595 if (err)
1596 goto fail;
1597 }
1598
1599 n_preds = 0;
1600 list_for_each_entry(elt, &ps->postfix, list) {
1601 if (elt->op == OP_NONE) {
1602 if (!operand1)
1603 operand1 = elt->operand;
1604 else if (!operand2)
1605 operand2 = elt->operand;
1606 else {
1607 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1608 err = -EINVAL;
1609 goto fail;
1610 }
1611 continue;
1612 }
1613
1614 if (elt->op == OP_NOT) {
1615 if (!n_preds || operand1 || operand2) {
1616 parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
1617 err = -EINVAL;
1618 goto fail;
1619 }
1620 if (!dry_run)
1621 filter->preds[n_preds - 1].not ^= 1;
1622 continue;
1623 }
1624
1625 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1626 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1627 err = -ENOSPC;
1628 goto fail;
1629 }
1630
1631 pred = create_pred(ps, call, elt->op, operand1, operand2);
1632 if (!pred) {
1633 err = -EINVAL;
1634 goto fail;
1635 }
1636
1637 if (!dry_run) {
1638 err = filter_add_pred(ps, filter, pred, &stack);
1639 if (err)
1640 goto fail;
1641 }
1642
1643 operand1 = operand2 = NULL;
1644 }
1645
1646 if (!dry_run) {
1647 /* We should have one item left on the stack */
1648 pred = __pop_pred_stack(&stack);
1649 if (!pred)
1650 return -EINVAL;
1651 /* This item is where we start from in matching */
1652 root = pred;
1653 /* Make sure the stack is empty */
1654 pred = __pop_pred_stack(&stack);
1655 if (WARN_ON(pred)) {
1656 err = -EINVAL;
1657 filter->root = NULL;
1658 goto fail;
1659 }
1660 err = check_pred_tree(filter, root);
1661 if (err)
1662 goto fail;
1663
1664 /* Optimize the tree */
1665 err = fold_pred_tree(filter, root);
1666 if (err)
1667 goto fail;
1668
1669 /* We don't set root until we know it works */
1670 barrier();
1671 filter->root = root;
1672 }
1673
1674 err = 0;
1675 fail:
1676 __free_pred_stack(&stack);
1677 return err;
1678 }
1679
1680 static inline void event_set_filtered_flag(struct trace_event_file *file)
1681 {
1682 struct trace_event_call *call = file->event_call;
1683
1684 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1685 call->flags |= TRACE_EVENT_FL_FILTERED;
1686 else
1687 file->flags |= EVENT_FILE_FL_FILTERED;
1688 }
1689
1690 static inline void event_set_filter(struct trace_event_file *file,
1691 struct event_filter *filter)
1692 {
1693 struct trace_event_call *call = file->event_call;
1694
1695 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1696 rcu_assign_pointer(call->filter, filter);
1697 else
1698 rcu_assign_pointer(file->filter, filter);
1699 }
1700
1701 static inline void event_clear_filter(struct trace_event_file *file)
1702 {
1703 struct trace_event_call *call = file->event_call;
1704
1705 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1706 RCU_INIT_POINTER(call->filter, NULL);
1707 else
1708 RCU_INIT_POINTER(file->filter, NULL);
1709 }
1710
1711 static inline void
1712 event_set_no_set_filter_flag(struct trace_event_file *file)
1713 {
1714 struct trace_event_call *call = file->event_call;
1715
1716 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1717 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1718 else
1719 file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
1720 }
1721
1722 static inline void
1723 event_clear_no_set_filter_flag(struct trace_event_file *file)
1724 {
1725 struct trace_event_call *call = file->event_call;
1726
1727 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1728 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1729 else
1730 file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
1731 }
1732
1733 static inline bool
1734 event_no_set_filter_flag(struct trace_event_file *file)
1735 {
1736 struct trace_event_call *call = file->event_call;
1737
1738 if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
1739 return true;
1740
1741 if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
1742 (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
1743 return true;
1744
1745 return false;
1746 }
1747
1748 struct filter_list {
1749 struct list_head list;
1750 struct event_filter *filter;
1751 };
1752
1753 static int replace_system_preds(struct trace_subsystem_dir *dir,
1754 struct trace_array *tr,
1755 struct filter_parse_state *ps,
1756 char *filter_string)
1757 {
1758 struct trace_event_file *file;
1759 struct filter_list *filter_item;
1760 struct filter_list *tmp;
1761 LIST_HEAD(filter_list);
1762 bool fail = true;
1763 int err;
1764
1765 list_for_each_entry(file, &tr->events, list) {
1766 if (file->system != dir)
1767 continue;
1768
1769 /*
1770 * Try to see if the filter can be applied
1771 * (filter arg is ignored on dry_run)
1772 */
1773 err = replace_preds(file->event_call, NULL, ps, true);
1774 if (err)
1775 event_set_no_set_filter_flag(file);
1776 else
1777 event_clear_no_set_filter_flag(file);
1778 }
1779
1780 list_for_each_entry(file, &tr->events, list) {
1781 struct event_filter *filter;
1782
1783 if (file->system != dir)
1784 continue;
1785
1786 if (event_no_set_filter_flag(file))
1787 continue;
1788
1789 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1790 if (!filter_item)
1791 goto fail_mem;
1792
1793 list_add_tail(&filter_item->list, &filter_list);
1794
1795 filter_item->filter = __alloc_filter();
1796 if (!filter_item->filter)
1797 goto fail_mem;
1798 filter = filter_item->filter;
1799
1800 /* Can only fail on no memory */
1801 err = replace_filter_string(filter, filter_string);
1802 if (err)
1803 goto fail_mem;
1804
1805 err = replace_preds(file->event_call, filter, ps, false);
1806 if (err) {
1807 filter_disable(file);
1808 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1809 append_filter_err(ps, filter);
1810 } else
1811 event_set_filtered_flag(file);
1812 /*
1813 * Regardless of if this returned an error, we still
1814 * replace the filter for the call.
1815 */
1816 filter = event_filter(file);
1817 event_set_filter(file, filter_item->filter);
1818 filter_item->filter = filter;
1819
1820 fail = false;
1821 }
1822
1823 if (fail)
1824 goto fail;
1825
1826 /*
1827 * The calls can still be using the old filters.
1828 * Do a synchronize_sched() to ensure all calls are
1829 * done with them before we free them.
1830 */
1831 synchronize_sched();
1832 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1833 __free_filter(filter_item->filter);
1834 list_del(&filter_item->list);
1835 kfree(filter_item);
1836 }
1837 return 0;
1838 fail:
1839 /* No call succeeded */
1840 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1841 list_del(&filter_item->list);
1842 kfree(filter_item);
1843 }
1844 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1845 return -EINVAL;
1846 fail_mem:
1847 /* If any call succeeded, we still need to sync */
1848 if (!fail)
1849 synchronize_sched();
1850 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1851 __free_filter(filter_item->filter);
1852 list_del(&filter_item->list);
1853 kfree(filter_item);
1854 }
1855 return -ENOMEM;
1856 }
1857
1858 static int create_filter_start(char *filter_str, bool set_str,
1859 struct filter_parse_state **psp,
1860 struct event_filter **filterp)
1861 {
1862 struct event_filter *filter;
1863 struct filter_parse_state *ps = NULL;
1864 int err = 0;
1865
1866 WARN_ON_ONCE(*psp || *filterp);
1867
1868 /* allocate everything, and if any fails, free all and fail */
1869 filter = __alloc_filter();
1870 if (filter && set_str)
1871 err = replace_filter_string(filter, filter_str);
1872
1873 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1874
1875 if (!filter || !ps || err) {
1876 kfree(ps);
1877 __free_filter(filter);
1878 return -ENOMEM;
1879 }
1880
1881 /* we're committed to creating a new filter */
1882 *filterp = filter;
1883 *psp = ps;
1884
1885 parse_init(ps, filter_ops, filter_str);
1886 err = filter_parse(ps);
1887 if (err && set_str)
1888 append_filter_err(ps, filter);
1889 return err;
1890 }
1891
1892 static void create_filter_finish(struct filter_parse_state *ps)
1893 {
1894 if (ps) {
1895 filter_opstack_clear(ps);
1896 postfix_clear(ps);
1897 kfree(ps);
1898 }
1899 }
1900
1901 /**
1902 * create_filter - create a filter for a trace_event_call
1903 * @call: trace_event_call to create a filter for
1904 * @filter_str: filter string
1905 * @set_str: remember @filter_str and enable detailed error in filter
1906 * @filterp: out param for created filter (always updated on return)
1907 *
1908 * Creates a filter for @call with @filter_str. If @set_str is %true,
1909 * @filter_str is copied and recorded in the new filter.
1910 *
1911 * On success, returns 0 and *@filterp points to the new filter. On
1912 * failure, returns -errno and *@filterp may point to %NULL or to a new
1913 * filter. In the latter case, the returned filter contains error
1914 * information if @set_str is %true and the caller is responsible for
1915 * freeing it.
1916 */
1917 static int create_filter(struct trace_event_call *call,
1918 char *filter_str, bool set_str,
1919 struct event_filter **filterp)
1920 {
1921 struct event_filter *filter = NULL;
1922 struct filter_parse_state *ps = NULL;
1923 int err;
1924
1925 err = create_filter_start(filter_str, set_str, &ps, &filter);
1926 if (!err) {
1927 err = replace_preds(call, filter, ps, false);
1928 if (err && set_str)
1929 append_filter_err(ps, filter);
1930 }
1931 create_filter_finish(ps);
1932
1933 *filterp = filter;
1934 return err;
1935 }
1936
1937 int create_event_filter(struct trace_event_call *call,
1938 char *filter_str, bool set_str,
1939 struct event_filter **filterp)
1940 {
1941 return create_filter(call, filter_str, set_str, filterp);
1942 }
1943
1944 /**
1945 * create_system_filter - create a filter for an event_subsystem
1946 * @system: event_subsystem to create a filter for
1947 * @filter_str: filter string
1948 * @filterp: out param for created filter (always updated on return)
1949 *
1950 * Identical to create_filter() except that it creates a subsystem filter
1951 * and always remembers @filter_str.
1952 */
1953 static int create_system_filter(struct trace_subsystem_dir *dir,
1954 struct trace_array *tr,
1955 char *filter_str, struct event_filter **filterp)
1956 {
1957 struct event_filter *filter = NULL;
1958 struct filter_parse_state *ps = NULL;
1959 int err;
1960
1961 err = create_filter_start(filter_str, true, &ps, &filter);
1962 if (!err) {
1963 err = replace_system_preds(dir, tr, ps, filter_str);
1964 if (!err) {
1965 /* System filters just show a default message */
1966 kfree(filter->filter_string);
1967 filter->filter_string = NULL;
1968 } else {
1969 append_filter_err(ps, filter);
1970 }
1971 }
1972 create_filter_finish(ps);
1973
1974 *filterp = filter;
1975 return err;
1976 }
1977
1978 /* caller must hold event_mutex */
1979 int apply_event_filter(struct trace_event_file *file, char *filter_string)
1980 {
1981 struct trace_event_call *call = file->event_call;
1982 struct event_filter *filter;
1983 int err;
1984
1985 if (!strcmp(strstrip(filter_string), "0")) {
1986 filter_disable(file);
1987 filter = event_filter(file);
1988
1989 if (!filter)
1990 return 0;
1991
1992 event_clear_filter(file);
1993
1994 /* Make sure the filter is not being used */
1995 synchronize_sched();
1996 __free_filter(filter);
1997
1998 return 0;
1999 }
2000
2001 err = create_filter(call, filter_string, true, &filter);
2002
2003 /*
2004 * Always swap the call filter with the new filter
2005 * even if there was an error. If there was an error
2006 * in the filter, we disable the filter and show the error
2007 * string
2008 */
2009 if (filter) {
2010 struct event_filter *tmp;
2011
2012 tmp = event_filter(file);
2013 if (!err)
2014 event_set_filtered_flag(file);
2015 else
2016 filter_disable(file);
2017
2018 event_set_filter(file, filter);
2019
2020 if (tmp) {
2021 /* Make sure the call is done with the filter */
2022 synchronize_sched();
2023 __free_filter(tmp);
2024 }
2025 }
2026
2027 return err;
2028 }
2029
2030 int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
2031 char *filter_string)
2032 {
2033 struct event_subsystem *system = dir->subsystem;
2034 struct trace_array *tr = dir->tr;
2035 struct event_filter *filter;
2036 int err = 0;
2037
2038 mutex_lock(&event_mutex);
2039
2040 /* Make sure the system still has events */
2041 if (!dir->nr_events) {
2042 err = -ENODEV;
2043 goto out_unlock;
2044 }
2045
2046 if (!strcmp(strstrip(filter_string), "0")) {
2047 filter_free_subsystem_preds(dir, tr);
2048 remove_filter_string(system->filter);
2049 filter = system->filter;
2050 system->filter = NULL;
2051 /* Ensure all filters are no longer used */
2052 synchronize_sched();
2053 filter_free_subsystem_filters(dir, tr);
2054 __free_filter(filter);
2055 goto out_unlock;
2056 }
2057
2058 err = create_system_filter(dir, tr, filter_string, &filter);
2059 if (filter) {
2060 /*
2061 * No event actually uses the system filter
2062 * we can free it without synchronize_sched().
2063 */
2064 __free_filter(system->filter);
2065 system->filter = filter;
2066 }
2067 out_unlock:
2068 mutex_unlock(&event_mutex);
2069
2070 return err;
2071 }
2072
2073 #ifdef CONFIG_PERF_EVENTS
2074
2075 void ftrace_profile_free_filter(struct perf_event *event)
2076 {
2077 struct event_filter *filter = event->filter;
2078
2079 event->filter = NULL;
2080 __free_filter(filter);
2081 }
2082
2083 struct function_filter_data {
2084 struct ftrace_ops *ops;
2085 int first_filter;
2086 int first_notrace;
2087 };
2088
2089 #ifdef CONFIG_FUNCTION_TRACER
2090 static char **
2091 ftrace_function_filter_re(char *buf, int len, int *count)
2092 {
2093 char *str, **re;
2094
2095 str = kstrndup(buf, len, GFP_KERNEL);
2096 if (!str)
2097 return NULL;
2098
2099 /*
2100 * The argv_split function takes white space
2101 * as a separator, so convert ',' into spaces.
2102 */
2103 strreplace(str, ',', ' ');
2104
2105 re = argv_split(GFP_KERNEL, str, count);
2106 kfree(str);
2107 return re;
2108 }
2109
2110 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2111 int reset, char *re, int len)
2112 {
2113 int ret;
2114
2115 if (filter)
2116 ret = ftrace_set_filter(ops, re, len, reset);
2117 else
2118 ret = ftrace_set_notrace(ops, re, len, reset);
2119
2120 return ret;
2121 }
2122
2123 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2124 struct function_filter_data *data)
2125 {
2126 int i, re_cnt, ret = -EINVAL;
2127 int *reset;
2128 char **re;
2129
2130 reset = filter ? &data->first_filter : &data->first_notrace;
2131
2132 /*
2133 * The 'ip' field could have multiple filters set, separated
2134 * either by space or comma. We first cut the filter and apply
2135 * all pieces separatelly.
2136 */
2137 re = ftrace_function_filter_re(buf, len, &re_cnt);
2138 if (!re)
2139 return -EINVAL;
2140
2141 for (i = 0; i < re_cnt; i++) {
2142 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2143 re[i], strlen(re[i]));
2144 if (ret)
2145 break;
2146
2147 if (*reset)
2148 *reset = 0;
2149 }
2150
2151 argv_free(re);
2152 return ret;
2153 }
2154
2155 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2156 {
2157 struct ftrace_event_field *field = pred->field;
2158
2159 if (leaf) {
2160 /*
2161 * Check the leaf predicate for function trace, verify:
2162 * - only '==' and '!=' is used
2163 * - the 'ip' field is used
2164 */
2165 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2166 return -EINVAL;
2167
2168 if (strcmp(field->name, "ip"))
2169 return -EINVAL;
2170 } else {
2171 /*
2172 * Check the non leaf predicate for function trace, verify:
2173 * - only '||' is used
2174 */
2175 if (pred->op != OP_OR)
2176 return -EINVAL;
2177 }
2178
2179 return 0;
2180 }
2181
2182 static int ftrace_function_set_filter_cb(enum move_type move,
2183 struct filter_pred *pred,
2184 int *err, void *data)
2185 {
2186 /* Checking the node is valid for function trace. */
2187 if ((move != MOVE_DOWN) ||
2188 (pred->left != FILTER_PRED_INVALID)) {
2189 *err = ftrace_function_check_pred(pred, 0);
2190 } else {
2191 *err = ftrace_function_check_pred(pred, 1);
2192 if (*err)
2193 return WALK_PRED_ABORT;
2194
2195 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2196 pred->regex.pattern,
2197 pred->regex.len,
2198 data);
2199 }
2200
2201 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2202 }
2203
2204 static int ftrace_function_set_filter(struct perf_event *event,
2205 struct event_filter *filter)
2206 {
2207 struct function_filter_data data = {
2208 .first_filter = 1,
2209 .first_notrace = 1,
2210 .ops = &event->ftrace_ops,
2211 };
2212
2213 return walk_pred_tree(filter->preds, filter->root,
2214 ftrace_function_set_filter_cb, &data);
2215 }
2216 #else
2217 static int ftrace_function_set_filter(struct perf_event *event,
2218 struct event_filter *filter)
2219 {
2220 return -ENODEV;
2221 }
2222 #endif /* CONFIG_FUNCTION_TRACER */
2223
2224 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2225 char *filter_str)
2226 {
2227 int err;
2228 struct event_filter *filter;
2229 struct trace_event_call *call;
2230
2231 mutex_lock(&event_mutex);
2232
2233 call = event->tp_event;
2234
2235 err = -EINVAL;
2236 if (!call)
2237 goto out_unlock;
2238
2239 err = -EEXIST;
2240 if (event->filter)
2241 goto out_unlock;
2242
2243 err = create_filter(call, filter_str, false, &filter);
2244 if (err)
2245 goto free_filter;
2246
2247 if (ftrace_event_is_function(call))
2248 err = ftrace_function_set_filter(event, filter);
2249 else
2250 event->filter = filter;
2251
2252 free_filter:
2253 if (err || ftrace_event_is_function(call))
2254 __free_filter(filter);
2255
2256 out_unlock:
2257 mutex_unlock(&event_mutex);
2258
2259 return err;
2260 }
2261
2262 #endif /* CONFIG_PERF_EVENTS */
2263
2264 #ifdef CONFIG_FTRACE_STARTUP_TEST
2265
2266 #include <linux/types.h>
2267 #include <linux/tracepoint.h>
2268
2269 #define CREATE_TRACE_POINTS
2270 #include "trace_events_filter_test.h"
2271
2272 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2273 { \
2274 .filter = FILTER, \
2275 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2276 .e = ve, .f = vf, .g = vg, .h = vh }, \
2277 .match = m, \
2278 .not_visited = nvisit, \
2279 }
2280 #define YES 1
2281 #define NO 0
2282
2283 static struct test_filter_data_t {
2284 char *filter;
2285 struct trace_event_raw_ftrace_test_filter rec;
2286 int match;
2287 char *not_visited;
2288 } test_filter_data[] = {
2289 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2290 "e == 1 && f == 1 && g == 1 && h == 1"
2291 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2292 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2293 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2294 #undef FILTER
2295 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2296 "e == 1 || f == 1 || g == 1 || h == 1"
2297 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2298 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2299 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2300 #undef FILTER
2301 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2302 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2303 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2304 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2305 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2306 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2307 #undef FILTER
2308 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2309 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2310 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2311 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2312 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2313 #undef FILTER
2314 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2315 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2316 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2317 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2318 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2319 #undef FILTER
2320 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2321 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2322 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2323 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2324 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2325 #undef FILTER
2326 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2327 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2328 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2329 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2330 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2331 #undef FILTER
2332 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2333 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2334 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2335 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2336 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2337 };
2338
2339 #undef DATA_REC
2340 #undef FILTER
2341 #undef YES
2342 #undef NO
2343
2344 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2345
2346 static int test_pred_visited;
2347
2348 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2349 {
2350 struct ftrace_event_field *field = pred->field;
2351
2352 test_pred_visited = 1;
2353 printk(KERN_INFO "\npred visited %s\n", field->name);
2354 return 1;
2355 }
2356
2357 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2358 int *err, void *data)
2359 {
2360 char *fields = data;
2361
2362 if ((move == MOVE_DOWN) &&
2363 (pred->left == FILTER_PRED_INVALID)) {
2364 struct ftrace_event_field *field = pred->field;
2365
2366 if (!field) {
2367 WARN(1, "all leafs should have field defined");
2368 return WALK_PRED_DEFAULT;
2369 }
2370 if (!strchr(fields, *field->name))
2371 return WALK_PRED_DEFAULT;
2372
2373 WARN_ON(!pred->fn);
2374 pred->fn = test_pred_visited_fn;
2375 }
2376 return WALK_PRED_DEFAULT;
2377 }
2378
2379 static __init int ftrace_test_event_filter(void)
2380 {
2381 int i;
2382
2383 printk(KERN_INFO "Testing ftrace filter: ");
2384
2385 for (i = 0; i < DATA_CNT; i++) {
2386 struct event_filter *filter = NULL;
2387 struct test_filter_data_t *d = &test_filter_data[i];
2388 int err;
2389
2390 err = create_filter(&event_ftrace_test_filter, d->filter,
2391 false, &filter);
2392 if (err) {
2393 printk(KERN_INFO
2394 "Failed to get filter for '%s', err %d\n",
2395 d->filter, err);
2396 __free_filter(filter);
2397 break;
2398 }
2399
2400 /*
2401 * The preemption disabling is not really needed for self
2402 * tests, but the rcu dereference will complain without it.
2403 */
2404 preempt_disable();
2405 if (*d->not_visited)
2406 walk_pred_tree(filter->preds, filter->root,
2407 test_walk_pred_cb,
2408 d->not_visited);
2409
2410 test_pred_visited = 0;
2411 err = filter_match_preds(filter, &d->rec);
2412 preempt_enable();
2413
2414 __free_filter(filter);
2415
2416 if (test_pred_visited) {
2417 printk(KERN_INFO
2418 "Failed, unwanted pred visited for filter %s\n",
2419 d->filter);
2420 break;
2421 }
2422
2423 if (err != d->match) {
2424 printk(KERN_INFO
2425 "Failed to match filter '%s', expected %d\n",
2426 d->filter, d->match);
2427 break;
2428 }
2429 }
2430
2431 if (i == DATA_CNT)
2432 printk(KERN_CONT "OK\n");
2433
2434 return 0;
2435 }
2436
2437 late_initcall(ftrace_test_event_filter);
2438
2439 #endif /* CONFIG_FTRACE_STARTUP_TEST */
This page took 0.107675 seconds and 6 git commands to generate.