Merge branch 'stable-4.5' of git://git.infradead.org/users/pcmoore/selinux into for...
[deliverable/linux.git] / kernel / trace / trace_events_filter.c
1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_BAND,
48 OP_NOT,
49 OP_NONE,
50 OP_OPEN_PAREN,
51 };
52
53 struct filter_op {
54 int id;
55 char *string;
56 int precedence;
57 };
58
59 /* Order must be the same as enum filter_op_ids above */
60 static struct filter_op filter_ops[] = {
61 { OP_OR, "||", 1 },
62 { OP_AND, "&&", 2 },
63 { OP_GLOB, "~", 4 },
64 { OP_NE, "!=", 4 },
65 { OP_EQ, "==", 4 },
66 { OP_LT, "<", 5 },
67 { OP_LE, "<=", 5 },
68 { OP_GT, ">", 5 },
69 { OP_GE, ">=", 5 },
70 { OP_BAND, "&", 6 },
71 { OP_NOT, "!", 6 },
72 { OP_NONE, "OP_NONE", 0 },
73 { OP_OPEN_PAREN, "(", 0 },
74 };
75
76 enum {
77 FILT_ERR_NONE,
78 FILT_ERR_INVALID_OP,
79 FILT_ERR_UNBALANCED_PAREN,
80 FILT_ERR_TOO_MANY_OPERANDS,
81 FILT_ERR_OPERAND_TOO_LONG,
82 FILT_ERR_FIELD_NOT_FOUND,
83 FILT_ERR_ILLEGAL_FIELD_OP,
84 FILT_ERR_ILLEGAL_INTVAL,
85 FILT_ERR_BAD_SUBSYS_FILTER,
86 FILT_ERR_TOO_MANY_PREDS,
87 FILT_ERR_MISSING_FIELD,
88 FILT_ERR_INVALID_FILTER,
89 FILT_ERR_IP_FIELD_ONLY,
90 FILT_ERR_ILLEGAL_NOT_OP,
91 };
92
93 static char *err_text[] = {
94 "No error",
95 "Invalid operator",
96 "Unbalanced parens",
97 "Too many operands",
98 "Operand too long",
99 "Field not found",
100 "Illegal operation for field type",
101 "Illegal integer value",
102 "Couldn't find or set field in one of a subsystem's events",
103 "Too many terms in predicate expression",
104 "Missing field name and/or value",
105 "Meaningless filter expression",
106 "Only 'ip' field is supported for function trace",
107 "Illegal use of '!'",
108 };
109
110 struct opstack_op {
111 int op;
112 struct list_head list;
113 };
114
115 struct postfix_elt {
116 int op;
117 char *operand;
118 struct list_head list;
119 };
120
121 struct filter_parse_state {
122 struct filter_op *ops;
123 struct list_head opstack;
124 struct list_head postfix;
125 int lasterr;
126 int lasterr_pos;
127
128 struct {
129 char *string;
130 unsigned int cnt;
131 unsigned int tail;
132 } infix;
133
134 struct {
135 char string[MAX_FILTER_STR_VAL];
136 int pos;
137 unsigned int tail;
138 } operand;
139 };
140
141 struct pred_stack {
142 struct filter_pred **preds;
143 int index;
144 };
145
146 /* If not of not match is equal to not of not, then it is a match */
147 #define DEFINE_COMPARISON_PRED(type) \
148 static int filter_pred_##type(struct filter_pred *pred, void *event) \
149 { \
150 type *addr = (type *)(event + pred->offset); \
151 type val = (type)pred->val; \
152 int match = 0; \
153 \
154 switch (pred->op) { \
155 case OP_LT: \
156 match = (*addr < val); \
157 break; \
158 case OP_LE: \
159 match = (*addr <= val); \
160 break; \
161 case OP_GT: \
162 match = (*addr > val); \
163 break; \
164 case OP_GE: \
165 match = (*addr >= val); \
166 break; \
167 case OP_BAND: \
168 match = (*addr & val); \
169 break; \
170 default: \
171 break; \
172 } \
173 \
174 return !!match == !pred->not; \
175 }
176
177 #define DEFINE_EQUALITY_PRED(size) \
178 static int filter_pred_##size(struct filter_pred *pred, void *event) \
179 { \
180 u##size *addr = (u##size *)(event + pred->offset); \
181 u##size val = (u##size)pred->val; \
182 int match; \
183 \
184 match = (val == *addr) ^ pred->not; \
185 \
186 return match; \
187 }
188
189 DEFINE_COMPARISON_PRED(s64);
190 DEFINE_COMPARISON_PRED(u64);
191 DEFINE_COMPARISON_PRED(s32);
192 DEFINE_COMPARISON_PRED(u32);
193 DEFINE_COMPARISON_PRED(s16);
194 DEFINE_COMPARISON_PRED(u16);
195 DEFINE_COMPARISON_PRED(s8);
196 DEFINE_COMPARISON_PRED(u8);
197
198 DEFINE_EQUALITY_PRED(64);
199 DEFINE_EQUALITY_PRED(32);
200 DEFINE_EQUALITY_PRED(16);
201 DEFINE_EQUALITY_PRED(8);
202
203 /* Filter predicate for fixed sized arrays of characters */
204 static int filter_pred_string(struct filter_pred *pred, void *event)
205 {
206 char *addr = (char *)(event + pred->offset);
207 int cmp, match;
208
209 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
210
211 match = cmp ^ pred->not;
212
213 return match;
214 }
215
216 /* Filter predicate for char * pointers */
217 static int filter_pred_pchar(struct filter_pred *pred, void *event)
218 {
219 char **addr = (char **)(event + pred->offset);
220 int cmp, match;
221 int len = strlen(*addr) + 1; /* including tailing '\0' */
222
223 cmp = pred->regex.match(*addr, &pred->regex, len);
224
225 match = cmp ^ pred->not;
226
227 return match;
228 }
229
230 /*
231 * Filter predicate for dynamic sized arrays of characters.
232 * These are implemented through a list of strings at the end
233 * of the entry.
234 * Also each of these strings have a field in the entry which
235 * contains its offset from the beginning of the entry.
236 * We have then first to get this field, dereference it
237 * and add it to the address of the entry, and at last we have
238 * the address of the string.
239 */
240 static int filter_pred_strloc(struct filter_pred *pred, void *event)
241 {
242 u32 str_item = *(u32 *)(event + pred->offset);
243 int str_loc = str_item & 0xffff;
244 int str_len = str_item >> 16;
245 char *addr = (char *)(event + str_loc);
246 int cmp, match;
247
248 cmp = pred->regex.match(addr, &pred->regex, str_len);
249
250 match = cmp ^ pred->not;
251
252 return match;
253 }
254
255 /* Filter predicate for CPUs. */
256 static int filter_pred_cpu(struct filter_pred *pred, void *event)
257 {
258 int cpu, cmp;
259 int match = 0;
260
261 cpu = raw_smp_processor_id();
262 cmp = pred->val;
263
264 switch (pred->op) {
265 case OP_EQ:
266 match = cpu == cmp;
267 break;
268 case OP_LT:
269 match = cpu < cmp;
270 break;
271 case OP_LE:
272 match = cpu <= cmp;
273 break;
274 case OP_GT:
275 match = cpu > cmp;
276 break;
277 case OP_GE:
278 match = cpu >= cmp;
279 break;
280 default:
281 break;
282 }
283
284 return !!match == !pred->not;
285 }
286
287 /* Filter predicate for COMM. */
288 static int filter_pred_comm(struct filter_pred *pred, void *event)
289 {
290 int cmp, match;
291
292 cmp = pred->regex.match(current->comm, &pred->regex,
293 pred->regex.field_len);
294 match = cmp ^ pred->not;
295
296 return match;
297 }
298
299 static int filter_pred_none(struct filter_pred *pred, void *event)
300 {
301 return 0;
302 }
303
304 /*
305 * regex_match_foo - Basic regex callbacks
306 *
307 * @str: the string to be searched
308 * @r: the regex structure containing the pattern string
309 * @len: the length of the string to be searched (including '\0')
310 *
311 * Note:
312 * - @str might not be NULL-terminated if it's of type DYN_STRING
313 * or STATIC_STRING
314 */
315
316 static int regex_match_full(char *str, struct regex *r, int len)
317 {
318 if (strncmp(str, r->pattern, len) == 0)
319 return 1;
320 return 0;
321 }
322
323 static int regex_match_front(char *str, struct regex *r, int len)
324 {
325 if (strncmp(str, r->pattern, r->len) == 0)
326 return 1;
327 return 0;
328 }
329
330 static int regex_match_middle(char *str, struct regex *r, int len)
331 {
332 if (strnstr(str, r->pattern, len))
333 return 1;
334 return 0;
335 }
336
337 static int regex_match_end(char *str, struct regex *r, int len)
338 {
339 int strlen = len - 1;
340
341 if (strlen >= r->len &&
342 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
343 return 1;
344 return 0;
345 }
346
347 /**
348 * filter_parse_regex - parse a basic regex
349 * @buff: the raw regex
350 * @len: length of the regex
351 * @search: will point to the beginning of the string to compare
352 * @not: tell whether the match will have to be inverted
353 *
354 * This passes in a buffer containing a regex and this function will
355 * set search to point to the search part of the buffer and
356 * return the type of search it is (see enum above).
357 * This does modify buff.
358 *
359 * Returns enum type.
360 * search returns the pointer to use for comparison.
361 * not returns 1 if buff started with a '!'
362 * 0 otherwise.
363 */
364 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
365 {
366 int type = MATCH_FULL;
367 int i;
368
369 if (buff[0] == '!') {
370 *not = 1;
371 buff++;
372 len--;
373 } else
374 *not = 0;
375
376 *search = buff;
377
378 for (i = 0; i < len; i++) {
379 if (buff[i] == '*') {
380 if (!i) {
381 *search = buff + 1;
382 type = MATCH_END_ONLY;
383 } else {
384 if (type == MATCH_END_ONLY)
385 type = MATCH_MIDDLE_ONLY;
386 else
387 type = MATCH_FRONT_ONLY;
388 buff[i] = 0;
389 break;
390 }
391 }
392 }
393
394 return type;
395 }
396
397 static void filter_build_regex(struct filter_pred *pred)
398 {
399 struct regex *r = &pred->regex;
400 char *search;
401 enum regex_type type = MATCH_FULL;
402 int not = 0;
403
404 if (pred->op == OP_GLOB) {
405 type = filter_parse_regex(r->pattern, r->len, &search, &not);
406 r->len = strlen(search);
407 memmove(r->pattern, search, r->len+1);
408 }
409
410 switch (type) {
411 case MATCH_FULL:
412 r->match = regex_match_full;
413 break;
414 case MATCH_FRONT_ONLY:
415 r->match = regex_match_front;
416 break;
417 case MATCH_MIDDLE_ONLY:
418 r->match = regex_match_middle;
419 break;
420 case MATCH_END_ONLY:
421 r->match = regex_match_end;
422 break;
423 }
424
425 pred->not ^= not;
426 }
427
428 enum move_type {
429 MOVE_DOWN,
430 MOVE_UP_FROM_LEFT,
431 MOVE_UP_FROM_RIGHT
432 };
433
434 static struct filter_pred *
435 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
436 int index, enum move_type *move)
437 {
438 if (pred->parent & FILTER_PRED_IS_RIGHT)
439 *move = MOVE_UP_FROM_RIGHT;
440 else
441 *move = MOVE_UP_FROM_LEFT;
442 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
443
444 return pred;
445 }
446
447 enum walk_return {
448 WALK_PRED_ABORT,
449 WALK_PRED_PARENT,
450 WALK_PRED_DEFAULT,
451 };
452
453 typedef int (*filter_pred_walkcb_t) (enum move_type move,
454 struct filter_pred *pred,
455 int *err, void *data);
456
457 static int walk_pred_tree(struct filter_pred *preds,
458 struct filter_pred *root,
459 filter_pred_walkcb_t cb, void *data)
460 {
461 struct filter_pred *pred = root;
462 enum move_type move = MOVE_DOWN;
463 int done = 0;
464
465 if (!preds)
466 return -EINVAL;
467
468 do {
469 int err = 0, ret;
470
471 ret = cb(move, pred, &err, data);
472 if (ret == WALK_PRED_ABORT)
473 return err;
474 if (ret == WALK_PRED_PARENT)
475 goto get_parent;
476
477 switch (move) {
478 case MOVE_DOWN:
479 if (pred->left != FILTER_PRED_INVALID) {
480 pred = &preds[pred->left];
481 continue;
482 }
483 goto get_parent;
484 case MOVE_UP_FROM_LEFT:
485 pred = &preds[pred->right];
486 move = MOVE_DOWN;
487 continue;
488 case MOVE_UP_FROM_RIGHT:
489 get_parent:
490 if (pred == root)
491 break;
492 pred = get_pred_parent(pred, preds,
493 pred->parent,
494 &move);
495 continue;
496 }
497 done = 1;
498 } while (!done);
499
500 /* We are fine. */
501 return 0;
502 }
503
504 /*
505 * A series of AND or ORs where found together. Instead of
506 * climbing up and down the tree branches, an array of the
507 * ops were made in order of checks. We can just move across
508 * the array and short circuit if needed.
509 */
510 static int process_ops(struct filter_pred *preds,
511 struct filter_pred *op, void *rec)
512 {
513 struct filter_pred *pred;
514 int match = 0;
515 int type;
516 int i;
517
518 /*
519 * Micro-optimization: We set type to true if op
520 * is an OR and false otherwise (AND). Then we
521 * just need to test if the match is equal to
522 * the type, and if it is, we can short circuit the
523 * rest of the checks:
524 *
525 * if ((match && op->op == OP_OR) ||
526 * (!match && op->op == OP_AND))
527 * return match;
528 */
529 type = op->op == OP_OR;
530
531 for (i = 0; i < op->val; i++) {
532 pred = &preds[op->ops[i]];
533 if (!WARN_ON_ONCE(!pred->fn))
534 match = pred->fn(pred, rec);
535 if (!!match == type)
536 break;
537 }
538 /* If not of not match is equal to not of not, then it is a match */
539 return !!match == !op->not;
540 }
541
542 struct filter_match_preds_data {
543 struct filter_pred *preds;
544 int match;
545 void *rec;
546 };
547
548 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
549 int *err, void *data)
550 {
551 struct filter_match_preds_data *d = data;
552
553 *err = 0;
554 switch (move) {
555 case MOVE_DOWN:
556 /* only AND and OR have children */
557 if (pred->left != FILTER_PRED_INVALID) {
558 /* If ops is set, then it was folded. */
559 if (!pred->ops)
560 return WALK_PRED_DEFAULT;
561 /* We can treat folded ops as a leaf node */
562 d->match = process_ops(d->preds, pred, d->rec);
563 } else {
564 if (!WARN_ON_ONCE(!pred->fn))
565 d->match = pred->fn(pred, d->rec);
566 }
567
568 return WALK_PRED_PARENT;
569 case MOVE_UP_FROM_LEFT:
570 /*
571 * Check for short circuits.
572 *
573 * Optimization: !!match == (pred->op == OP_OR)
574 * is the same as:
575 * if ((match && pred->op == OP_OR) ||
576 * (!match && pred->op == OP_AND))
577 */
578 if (!!d->match == (pred->op == OP_OR))
579 return WALK_PRED_PARENT;
580 break;
581 case MOVE_UP_FROM_RIGHT:
582 break;
583 }
584
585 return WALK_PRED_DEFAULT;
586 }
587
588 /* return 1 if event matches, 0 otherwise (discard) */
589 int filter_match_preds(struct event_filter *filter, void *rec)
590 {
591 struct filter_pred *preds;
592 struct filter_pred *root;
593 struct filter_match_preds_data data = {
594 /* match is currently meaningless */
595 .match = -1,
596 .rec = rec,
597 };
598 int n_preds, ret;
599
600 /* no filter is considered a match */
601 if (!filter)
602 return 1;
603
604 n_preds = filter->n_preds;
605 if (!n_preds)
606 return 1;
607
608 /*
609 * n_preds, root and filter->preds are protect with preemption disabled.
610 */
611 root = rcu_dereference_sched(filter->root);
612 if (!root)
613 return 1;
614
615 data.preds = preds = rcu_dereference_sched(filter->preds);
616 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
617 WARN_ON(ret);
618 return data.match;
619 }
620 EXPORT_SYMBOL_GPL(filter_match_preds);
621
622 static void parse_error(struct filter_parse_state *ps, int err, int pos)
623 {
624 ps->lasterr = err;
625 ps->lasterr_pos = pos;
626 }
627
628 static void remove_filter_string(struct event_filter *filter)
629 {
630 if (!filter)
631 return;
632
633 kfree(filter->filter_string);
634 filter->filter_string = NULL;
635 }
636
637 static int replace_filter_string(struct event_filter *filter,
638 char *filter_string)
639 {
640 kfree(filter->filter_string);
641 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
642 if (!filter->filter_string)
643 return -ENOMEM;
644
645 return 0;
646 }
647
648 static int append_filter_string(struct event_filter *filter,
649 char *string)
650 {
651 int newlen;
652 char *new_filter_string;
653
654 BUG_ON(!filter->filter_string);
655 newlen = strlen(filter->filter_string) + strlen(string) + 1;
656 new_filter_string = kmalloc(newlen, GFP_KERNEL);
657 if (!new_filter_string)
658 return -ENOMEM;
659
660 strcpy(new_filter_string, filter->filter_string);
661 strcat(new_filter_string, string);
662 kfree(filter->filter_string);
663 filter->filter_string = new_filter_string;
664
665 return 0;
666 }
667
668 static void append_filter_err(struct filter_parse_state *ps,
669 struct event_filter *filter)
670 {
671 int pos = ps->lasterr_pos;
672 char *buf, *pbuf;
673
674 buf = (char *)__get_free_page(GFP_TEMPORARY);
675 if (!buf)
676 return;
677
678 append_filter_string(filter, "\n");
679 memset(buf, ' ', PAGE_SIZE);
680 if (pos > PAGE_SIZE - 128)
681 pos = 0;
682 buf[pos] = '^';
683 pbuf = &buf[pos] + 1;
684
685 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
686 append_filter_string(filter, buf);
687 free_page((unsigned long) buf);
688 }
689
690 static inline struct event_filter *event_filter(struct trace_event_file *file)
691 {
692 if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
693 return file->event_call->filter;
694 else
695 return file->filter;
696 }
697
698 /* caller must hold event_mutex */
699 void print_event_filter(struct trace_event_file *file, struct trace_seq *s)
700 {
701 struct event_filter *filter = event_filter(file);
702
703 if (filter && filter->filter_string)
704 trace_seq_printf(s, "%s\n", filter->filter_string);
705 else
706 trace_seq_puts(s, "none\n");
707 }
708
709 void print_subsystem_event_filter(struct event_subsystem *system,
710 struct trace_seq *s)
711 {
712 struct event_filter *filter;
713
714 mutex_lock(&event_mutex);
715 filter = system->filter;
716 if (filter && filter->filter_string)
717 trace_seq_printf(s, "%s\n", filter->filter_string);
718 else
719 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
720 mutex_unlock(&event_mutex);
721 }
722
723 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
724 {
725 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
726 if (!stack->preds)
727 return -ENOMEM;
728 stack->index = n_preds;
729 return 0;
730 }
731
732 static void __free_pred_stack(struct pred_stack *stack)
733 {
734 kfree(stack->preds);
735 stack->index = 0;
736 }
737
738 static int __push_pred_stack(struct pred_stack *stack,
739 struct filter_pred *pred)
740 {
741 int index = stack->index;
742
743 if (WARN_ON(index == 0))
744 return -ENOSPC;
745
746 stack->preds[--index] = pred;
747 stack->index = index;
748 return 0;
749 }
750
751 static struct filter_pred *
752 __pop_pred_stack(struct pred_stack *stack)
753 {
754 struct filter_pred *pred;
755 int index = stack->index;
756
757 pred = stack->preds[index++];
758 if (!pred)
759 return NULL;
760
761 stack->index = index;
762 return pred;
763 }
764
765 static int filter_set_pred(struct event_filter *filter,
766 int idx,
767 struct pred_stack *stack,
768 struct filter_pred *src)
769 {
770 struct filter_pred *dest = &filter->preds[idx];
771 struct filter_pred *left;
772 struct filter_pred *right;
773
774 *dest = *src;
775 dest->index = idx;
776
777 if (dest->op == OP_OR || dest->op == OP_AND) {
778 right = __pop_pred_stack(stack);
779 left = __pop_pred_stack(stack);
780 if (!left || !right)
781 return -EINVAL;
782 /*
783 * If both children can be folded
784 * and they are the same op as this op or a leaf,
785 * then this op can be folded.
786 */
787 if (left->index & FILTER_PRED_FOLD &&
788 ((left->op == dest->op && !left->not) ||
789 left->left == FILTER_PRED_INVALID) &&
790 right->index & FILTER_PRED_FOLD &&
791 ((right->op == dest->op && !right->not) ||
792 right->left == FILTER_PRED_INVALID))
793 dest->index |= FILTER_PRED_FOLD;
794
795 dest->left = left->index & ~FILTER_PRED_FOLD;
796 dest->right = right->index & ~FILTER_PRED_FOLD;
797 left->parent = dest->index & ~FILTER_PRED_FOLD;
798 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
799 } else {
800 /*
801 * Make dest->left invalid to be used as a quick
802 * way to know this is a leaf node.
803 */
804 dest->left = FILTER_PRED_INVALID;
805
806 /* All leafs allow folding the parent ops. */
807 dest->index |= FILTER_PRED_FOLD;
808 }
809
810 return __push_pred_stack(stack, dest);
811 }
812
813 static void __free_preds(struct event_filter *filter)
814 {
815 int i;
816
817 if (filter->preds) {
818 for (i = 0; i < filter->n_preds; i++)
819 kfree(filter->preds[i].ops);
820 kfree(filter->preds);
821 filter->preds = NULL;
822 }
823 filter->a_preds = 0;
824 filter->n_preds = 0;
825 }
826
827 static void filter_disable(struct trace_event_file *file)
828 {
829 struct trace_event_call *call = file->event_call;
830
831 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
832 call->flags &= ~TRACE_EVENT_FL_FILTERED;
833 else
834 file->flags &= ~EVENT_FILE_FL_FILTERED;
835 }
836
837 static void __free_filter(struct event_filter *filter)
838 {
839 if (!filter)
840 return;
841
842 __free_preds(filter);
843 kfree(filter->filter_string);
844 kfree(filter);
845 }
846
847 void free_event_filter(struct event_filter *filter)
848 {
849 __free_filter(filter);
850 }
851
852 static struct event_filter *__alloc_filter(void)
853 {
854 struct event_filter *filter;
855
856 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
857 return filter;
858 }
859
860 static int __alloc_preds(struct event_filter *filter, int n_preds)
861 {
862 struct filter_pred *pred;
863 int i;
864
865 if (filter->preds)
866 __free_preds(filter);
867
868 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
869
870 if (!filter->preds)
871 return -ENOMEM;
872
873 filter->a_preds = n_preds;
874 filter->n_preds = 0;
875
876 for (i = 0; i < n_preds; i++) {
877 pred = &filter->preds[i];
878 pred->fn = filter_pred_none;
879 }
880
881 return 0;
882 }
883
884 static inline void __remove_filter(struct trace_event_file *file)
885 {
886 struct trace_event_call *call = file->event_call;
887
888 filter_disable(file);
889 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
890 remove_filter_string(call->filter);
891 else
892 remove_filter_string(file->filter);
893 }
894
895 static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir,
896 struct trace_array *tr)
897 {
898 struct trace_event_file *file;
899
900 list_for_each_entry(file, &tr->events, list) {
901 if (file->system != dir)
902 continue;
903 __remove_filter(file);
904 }
905 }
906
907 static inline void __free_subsystem_filter(struct trace_event_file *file)
908 {
909 struct trace_event_call *call = file->event_call;
910
911 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
912 __free_filter(call->filter);
913 call->filter = NULL;
914 } else {
915 __free_filter(file->filter);
916 file->filter = NULL;
917 }
918 }
919
920 static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
921 struct trace_array *tr)
922 {
923 struct trace_event_file *file;
924
925 list_for_each_entry(file, &tr->events, list) {
926 if (file->system != dir)
927 continue;
928 __free_subsystem_filter(file);
929 }
930 }
931
932 static int filter_add_pred(struct filter_parse_state *ps,
933 struct event_filter *filter,
934 struct filter_pred *pred,
935 struct pred_stack *stack)
936 {
937 int err;
938
939 if (WARN_ON(filter->n_preds == filter->a_preds)) {
940 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
941 return -ENOSPC;
942 }
943
944 err = filter_set_pred(filter, filter->n_preds, stack, pred);
945 if (err)
946 return err;
947
948 filter->n_preds++;
949
950 return 0;
951 }
952
953 int filter_assign_type(const char *type)
954 {
955 if (strstr(type, "__data_loc") && strstr(type, "char"))
956 return FILTER_DYN_STRING;
957
958 if (strchr(type, '[') && strstr(type, "char"))
959 return FILTER_STATIC_STRING;
960
961 return FILTER_OTHER;
962 }
963
964 static bool is_function_field(struct ftrace_event_field *field)
965 {
966 return field->filter_type == FILTER_TRACE_FN;
967 }
968
969 static bool is_string_field(struct ftrace_event_field *field)
970 {
971 return field->filter_type == FILTER_DYN_STRING ||
972 field->filter_type == FILTER_STATIC_STRING ||
973 field->filter_type == FILTER_PTR_STRING;
974 }
975
976 static bool is_legal_op(struct ftrace_event_field *field, int op)
977 {
978 if (is_string_field(field) &&
979 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
980 return false;
981 if (!is_string_field(field) && op == OP_GLOB)
982 return false;
983
984 return true;
985 }
986
987 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
988 int field_is_signed)
989 {
990 filter_pred_fn_t fn = NULL;
991
992 switch (field_size) {
993 case 8:
994 if (op == OP_EQ || op == OP_NE)
995 fn = filter_pred_64;
996 else if (field_is_signed)
997 fn = filter_pred_s64;
998 else
999 fn = filter_pred_u64;
1000 break;
1001 case 4:
1002 if (op == OP_EQ || op == OP_NE)
1003 fn = filter_pred_32;
1004 else if (field_is_signed)
1005 fn = filter_pred_s32;
1006 else
1007 fn = filter_pred_u32;
1008 break;
1009 case 2:
1010 if (op == OP_EQ || op == OP_NE)
1011 fn = filter_pred_16;
1012 else if (field_is_signed)
1013 fn = filter_pred_s16;
1014 else
1015 fn = filter_pred_u16;
1016 break;
1017 case 1:
1018 if (op == OP_EQ || op == OP_NE)
1019 fn = filter_pred_8;
1020 else if (field_is_signed)
1021 fn = filter_pred_s8;
1022 else
1023 fn = filter_pred_u8;
1024 break;
1025 }
1026
1027 return fn;
1028 }
1029
1030 static int init_pred(struct filter_parse_state *ps,
1031 struct ftrace_event_field *field,
1032 struct filter_pred *pred)
1033
1034 {
1035 filter_pred_fn_t fn = filter_pred_none;
1036 unsigned long long val;
1037 int ret;
1038
1039 pred->offset = field->offset;
1040
1041 if (!is_legal_op(field, pred->op)) {
1042 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
1043 return -EINVAL;
1044 }
1045
1046 if (is_string_field(field)) {
1047 filter_build_regex(pred);
1048
1049 if (!strcmp(field->name, "comm")) {
1050 fn = filter_pred_comm;
1051 pred->regex.field_len = TASK_COMM_LEN;
1052 } else if (field->filter_type == FILTER_STATIC_STRING) {
1053 fn = filter_pred_string;
1054 pred->regex.field_len = field->size;
1055 } else if (field->filter_type == FILTER_DYN_STRING)
1056 fn = filter_pred_strloc;
1057 else
1058 fn = filter_pred_pchar;
1059 } else if (is_function_field(field)) {
1060 if (strcmp(field->name, "ip")) {
1061 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1062 return -EINVAL;
1063 }
1064 } else {
1065 if (field->is_signed)
1066 ret = kstrtoll(pred->regex.pattern, 0, &val);
1067 else
1068 ret = kstrtoull(pred->regex.pattern, 0, &val);
1069 if (ret) {
1070 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1071 return -EINVAL;
1072 }
1073 pred->val = val;
1074
1075 if (!strcmp(field->name, "cpu"))
1076 fn = filter_pred_cpu;
1077 else
1078 fn = select_comparison_fn(pred->op, field->size,
1079 field->is_signed);
1080 if (!fn) {
1081 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1082 return -EINVAL;
1083 }
1084 }
1085
1086 if (pred->op == OP_NE)
1087 pred->not ^= 1;
1088
1089 pred->fn = fn;
1090 return 0;
1091 }
1092
1093 static void parse_init(struct filter_parse_state *ps,
1094 struct filter_op *ops,
1095 char *infix_string)
1096 {
1097 memset(ps, '\0', sizeof(*ps));
1098
1099 ps->infix.string = infix_string;
1100 ps->infix.cnt = strlen(infix_string);
1101 ps->ops = ops;
1102
1103 INIT_LIST_HEAD(&ps->opstack);
1104 INIT_LIST_HEAD(&ps->postfix);
1105 }
1106
1107 static char infix_next(struct filter_parse_state *ps)
1108 {
1109 if (!ps->infix.cnt)
1110 return 0;
1111
1112 ps->infix.cnt--;
1113
1114 return ps->infix.string[ps->infix.tail++];
1115 }
1116
1117 static char infix_peek(struct filter_parse_state *ps)
1118 {
1119 if (ps->infix.tail == strlen(ps->infix.string))
1120 return 0;
1121
1122 return ps->infix.string[ps->infix.tail];
1123 }
1124
1125 static void infix_advance(struct filter_parse_state *ps)
1126 {
1127 if (!ps->infix.cnt)
1128 return;
1129
1130 ps->infix.cnt--;
1131 ps->infix.tail++;
1132 }
1133
1134 static inline int is_precedence_lower(struct filter_parse_state *ps,
1135 int a, int b)
1136 {
1137 return ps->ops[a].precedence < ps->ops[b].precedence;
1138 }
1139
1140 static inline int is_op_char(struct filter_parse_state *ps, char c)
1141 {
1142 int i;
1143
1144 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1145 if (ps->ops[i].string[0] == c)
1146 return 1;
1147 }
1148
1149 return 0;
1150 }
1151
1152 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1153 {
1154 char nextc = infix_peek(ps);
1155 char opstr[3];
1156 int i;
1157
1158 opstr[0] = firstc;
1159 opstr[1] = nextc;
1160 opstr[2] = '\0';
1161
1162 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1163 if (!strcmp(opstr, ps->ops[i].string)) {
1164 infix_advance(ps);
1165 return ps->ops[i].id;
1166 }
1167 }
1168
1169 opstr[1] = '\0';
1170
1171 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1172 if (!strcmp(opstr, ps->ops[i].string))
1173 return ps->ops[i].id;
1174 }
1175
1176 return OP_NONE;
1177 }
1178
1179 static inline void clear_operand_string(struct filter_parse_state *ps)
1180 {
1181 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1182 ps->operand.tail = 0;
1183 }
1184
1185 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1186 {
1187 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1188 return -EINVAL;
1189
1190 ps->operand.string[ps->operand.tail++] = c;
1191
1192 return 0;
1193 }
1194
1195 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1196 {
1197 struct opstack_op *opstack_op;
1198
1199 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1200 if (!opstack_op)
1201 return -ENOMEM;
1202
1203 opstack_op->op = op;
1204 list_add(&opstack_op->list, &ps->opstack);
1205
1206 return 0;
1207 }
1208
1209 static int filter_opstack_empty(struct filter_parse_state *ps)
1210 {
1211 return list_empty(&ps->opstack);
1212 }
1213
1214 static int filter_opstack_top(struct filter_parse_state *ps)
1215 {
1216 struct opstack_op *opstack_op;
1217
1218 if (filter_opstack_empty(ps))
1219 return OP_NONE;
1220
1221 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1222
1223 return opstack_op->op;
1224 }
1225
1226 static int filter_opstack_pop(struct filter_parse_state *ps)
1227 {
1228 struct opstack_op *opstack_op;
1229 int op;
1230
1231 if (filter_opstack_empty(ps))
1232 return OP_NONE;
1233
1234 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1235 op = opstack_op->op;
1236 list_del(&opstack_op->list);
1237
1238 kfree(opstack_op);
1239
1240 return op;
1241 }
1242
1243 static void filter_opstack_clear(struct filter_parse_state *ps)
1244 {
1245 while (!filter_opstack_empty(ps))
1246 filter_opstack_pop(ps);
1247 }
1248
1249 static char *curr_operand(struct filter_parse_state *ps)
1250 {
1251 return ps->operand.string;
1252 }
1253
1254 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1255 {
1256 struct postfix_elt *elt;
1257
1258 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1259 if (!elt)
1260 return -ENOMEM;
1261
1262 elt->op = OP_NONE;
1263 elt->operand = kstrdup(operand, GFP_KERNEL);
1264 if (!elt->operand) {
1265 kfree(elt);
1266 return -ENOMEM;
1267 }
1268
1269 list_add_tail(&elt->list, &ps->postfix);
1270
1271 return 0;
1272 }
1273
1274 static int postfix_append_op(struct filter_parse_state *ps, int op)
1275 {
1276 struct postfix_elt *elt;
1277
1278 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1279 if (!elt)
1280 return -ENOMEM;
1281
1282 elt->op = op;
1283 elt->operand = NULL;
1284
1285 list_add_tail(&elt->list, &ps->postfix);
1286
1287 return 0;
1288 }
1289
1290 static void postfix_clear(struct filter_parse_state *ps)
1291 {
1292 struct postfix_elt *elt;
1293
1294 while (!list_empty(&ps->postfix)) {
1295 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1296 list_del(&elt->list);
1297 kfree(elt->operand);
1298 kfree(elt);
1299 }
1300 }
1301
1302 static int filter_parse(struct filter_parse_state *ps)
1303 {
1304 int in_string = 0;
1305 int op, top_op;
1306 char ch;
1307
1308 while ((ch = infix_next(ps))) {
1309 if (ch == '"') {
1310 in_string ^= 1;
1311 continue;
1312 }
1313
1314 if (in_string)
1315 goto parse_operand;
1316
1317 if (isspace(ch))
1318 continue;
1319
1320 if (is_op_char(ps, ch)) {
1321 op = infix_get_op(ps, ch);
1322 if (op == OP_NONE) {
1323 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1324 return -EINVAL;
1325 }
1326
1327 if (strlen(curr_operand(ps))) {
1328 postfix_append_operand(ps, curr_operand(ps));
1329 clear_operand_string(ps);
1330 }
1331
1332 while (!filter_opstack_empty(ps)) {
1333 top_op = filter_opstack_top(ps);
1334 if (!is_precedence_lower(ps, top_op, op)) {
1335 top_op = filter_opstack_pop(ps);
1336 postfix_append_op(ps, top_op);
1337 continue;
1338 }
1339 break;
1340 }
1341
1342 filter_opstack_push(ps, op);
1343 continue;
1344 }
1345
1346 if (ch == '(') {
1347 filter_opstack_push(ps, OP_OPEN_PAREN);
1348 continue;
1349 }
1350
1351 if (ch == ')') {
1352 if (strlen(curr_operand(ps))) {
1353 postfix_append_operand(ps, curr_operand(ps));
1354 clear_operand_string(ps);
1355 }
1356
1357 top_op = filter_opstack_pop(ps);
1358 while (top_op != OP_NONE) {
1359 if (top_op == OP_OPEN_PAREN)
1360 break;
1361 postfix_append_op(ps, top_op);
1362 top_op = filter_opstack_pop(ps);
1363 }
1364 if (top_op == OP_NONE) {
1365 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1366 return -EINVAL;
1367 }
1368 continue;
1369 }
1370 parse_operand:
1371 if (append_operand_char(ps, ch)) {
1372 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1373 return -EINVAL;
1374 }
1375 }
1376
1377 if (strlen(curr_operand(ps)))
1378 postfix_append_operand(ps, curr_operand(ps));
1379
1380 while (!filter_opstack_empty(ps)) {
1381 top_op = filter_opstack_pop(ps);
1382 if (top_op == OP_NONE)
1383 break;
1384 if (top_op == OP_OPEN_PAREN) {
1385 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1386 return -EINVAL;
1387 }
1388 postfix_append_op(ps, top_op);
1389 }
1390
1391 return 0;
1392 }
1393
1394 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1395 struct trace_event_call *call,
1396 int op, char *operand1, char *operand2)
1397 {
1398 struct ftrace_event_field *field;
1399 static struct filter_pred pred;
1400
1401 memset(&pred, 0, sizeof(pred));
1402 pred.op = op;
1403
1404 if (op == OP_AND || op == OP_OR)
1405 return &pred;
1406
1407 if (!operand1 || !operand2) {
1408 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1409 return NULL;
1410 }
1411
1412 field = trace_find_event_field(call, operand1);
1413 if (!field) {
1414 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1415 return NULL;
1416 }
1417
1418 strcpy(pred.regex.pattern, operand2);
1419 pred.regex.len = strlen(pred.regex.pattern);
1420 pred.field = field;
1421 return init_pred(ps, field, &pred) ? NULL : &pred;
1422 }
1423
1424 static int check_preds(struct filter_parse_state *ps)
1425 {
1426 int n_normal_preds = 0, n_logical_preds = 0;
1427 struct postfix_elt *elt;
1428 int cnt = 0;
1429
1430 list_for_each_entry(elt, &ps->postfix, list) {
1431 if (elt->op == OP_NONE) {
1432 cnt++;
1433 continue;
1434 }
1435
1436 if (elt->op == OP_AND || elt->op == OP_OR) {
1437 n_logical_preds++;
1438 cnt--;
1439 continue;
1440 }
1441 if (elt->op != OP_NOT)
1442 cnt--;
1443 n_normal_preds++;
1444 /* all ops should have operands */
1445 if (cnt < 0)
1446 break;
1447 }
1448
1449 if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
1450 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1451 return -EINVAL;
1452 }
1453
1454 return 0;
1455 }
1456
1457 static int count_preds(struct filter_parse_state *ps)
1458 {
1459 struct postfix_elt *elt;
1460 int n_preds = 0;
1461
1462 list_for_each_entry(elt, &ps->postfix, list) {
1463 if (elt->op == OP_NONE)
1464 continue;
1465 n_preds++;
1466 }
1467
1468 return n_preds;
1469 }
1470
1471 struct check_pred_data {
1472 int count;
1473 int max;
1474 };
1475
1476 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1477 int *err, void *data)
1478 {
1479 struct check_pred_data *d = data;
1480
1481 if (WARN_ON(d->count++ > d->max)) {
1482 *err = -EINVAL;
1483 return WALK_PRED_ABORT;
1484 }
1485 return WALK_PRED_DEFAULT;
1486 }
1487
1488 /*
1489 * The tree is walked at filtering of an event. If the tree is not correctly
1490 * built, it may cause an infinite loop. Check here that the tree does
1491 * indeed terminate.
1492 */
1493 static int check_pred_tree(struct event_filter *filter,
1494 struct filter_pred *root)
1495 {
1496 struct check_pred_data data = {
1497 /*
1498 * The max that we can hit a node is three times.
1499 * Once going down, once coming up from left, and
1500 * once coming up from right. This is more than enough
1501 * since leafs are only hit a single time.
1502 */
1503 .max = 3 * filter->n_preds,
1504 .count = 0,
1505 };
1506
1507 return walk_pred_tree(filter->preds, root,
1508 check_pred_tree_cb, &data);
1509 }
1510
1511 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1512 int *err, void *data)
1513 {
1514 int *count = data;
1515
1516 if ((move == MOVE_DOWN) &&
1517 (pred->left == FILTER_PRED_INVALID))
1518 (*count)++;
1519
1520 return WALK_PRED_DEFAULT;
1521 }
1522
1523 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1524 {
1525 int count = 0, ret;
1526
1527 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1528 WARN_ON(ret);
1529 return count;
1530 }
1531
1532 struct fold_pred_data {
1533 struct filter_pred *root;
1534 int count;
1535 int children;
1536 };
1537
1538 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1539 int *err, void *data)
1540 {
1541 struct fold_pred_data *d = data;
1542 struct filter_pred *root = d->root;
1543
1544 if (move != MOVE_DOWN)
1545 return WALK_PRED_DEFAULT;
1546 if (pred->left != FILTER_PRED_INVALID)
1547 return WALK_PRED_DEFAULT;
1548
1549 if (WARN_ON(d->count == d->children)) {
1550 *err = -EINVAL;
1551 return WALK_PRED_ABORT;
1552 }
1553
1554 pred->index &= ~FILTER_PRED_FOLD;
1555 root->ops[d->count++] = pred->index;
1556 return WALK_PRED_DEFAULT;
1557 }
1558
1559 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1560 {
1561 struct fold_pred_data data = {
1562 .root = root,
1563 .count = 0,
1564 };
1565 int children;
1566
1567 /* No need to keep the fold flag */
1568 root->index &= ~FILTER_PRED_FOLD;
1569
1570 /* If the root is a leaf then do nothing */
1571 if (root->left == FILTER_PRED_INVALID)
1572 return 0;
1573
1574 /* count the children */
1575 children = count_leafs(preds, &preds[root->left]);
1576 children += count_leafs(preds, &preds[root->right]);
1577
1578 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1579 if (!root->ops)
1580 return -ENOMEM;
1581
1582 root->val = children;
1583 data.children = children;
1584 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1585 }
1586
1587 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1588 int *err, void *data)
1589 {
1590 struct filter_pred *preds = data;
1591
1592 if (move != MOVE_DOWN)
1593 return WALK_PRED_DEFAULT;
1594 if (!(pred->index & FILTER_PRED_FOLD))
1595 return WALK_PRED_DEFAULT;
1596
1597 *err = fold_pred(preds, pred);
1598 if (*err)
1599 return WALK_PRED_ABORT;
1600
1601 /* eveyrhing below is folded, continue with parent */
1602 return WALK_PRED_PARENT;
1603 }
1604
1605 /*
1606 * To optimize the processing of the ops, if we have several "ors" or
1607 * "ands" together, we can put them in an array and process them all
1608 * together speeding up the filter logic.
1609 */
1610 static int fold_pred_tree(struct event_filter *filter,
1611 struct filter_pred *root)
1612 {
1613 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1614 filter->preds);
1615 }
1616
1617 static int replace_preds(struct trace_event_call *call,
1618 struct event_filter *filter,
1619 struct filter_parse_state *ps,
1620 bool dry_run)
1621 {
1622 char *operand1 = NULL, *operand2 = NULL;
1623 struct filter_pred *pred;
1624 struct filter_pred *root;
1625 struct postfix_elt *elt;
1626 struct pred_stack stack = { }; /* init to NULL */
1627 int err;
1628 int n_preds = 0;
1629
1630 n_preds = count_preds(ps);
1631 if (n_preds >= MAX_FILTER_PRED) {
1632 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1633 return -ENOSPC;
1634 }
1635
1636 err = check_preds(ps);
1637 if (err)
1638 return err;
1639
1640 if (!dry_run) {
1641 err = __alloc_pred_stack(&stack, n_preds);
1642 if (err)
1643 return err;
1644 err = __alloc_preds(filter, n_preds);
1645 if (err)
1646 goto fail;
1647 }
1648
1649 n_preds = 0;
1650 list_for_each_entry(elt, &ps->postfix, list) {
1651 if (elt->op == OP_NONE) {
1652 if (!operand1)
1653 operand1 = elt->operand;
1654 else if (!operand2)
1655 operand2 = elt->operand;
1656 else {
1657 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1658 err = -EINVAL;
1659 goto fail;
1660 }
1661 continue;
1662 }
1663
1664 if (elt->op == OP_NOT) {
1665 if (!n_preds || operand1 || operand2) {
1666 parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
1667 err = -EINVAL;
1668 goto fail;
1669 }
1670 if (!dry_run)
1671 filter->preds[n_preds - 1].not ^= 1;
1672 continue;
1673 }
1674
1675 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1676 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1677 err = -ENOSPC;
1678 goto fail;
1679 }
1680
1681 pred = create_pred(ps, call, elt->op, operand1, operand2);
1682 if (!pred) {
1683 err = -EINVAL;
1684 goto fail;
1685 }
1686
1687 if (!dry_run) {
1688 err = filter_add_pred(ps, filter, pred, &stack);
1689 if (err)
1690 goto fail;
1691 }
1692
1693 operand1 = operand2 = NULL;
1694 }
1695
1696 if (!dry_run) {
1697 /* We should have one item left on the stack */
1698 pred = __pop_pred_stack(&stack);
1699 if (!pred)
1700 return -EINVAL;
1701 /* This item is where we start from in matching */
1702 root = pred;
1703 /* Make sure the stack is empty */
1704 pred = __pop_pred_stack(&stack);
1705 if (WARN_ON(pred)) {
1706 err = -EINVAL;
1707 filter->root = NULL;
1708 goto fail;
1709 }
1710 err = check_pred_tree(filter, root);
1711 if (err)
1712 goto fail;
1713
1714 /* Optimize the tree */
1715 err = fold_pred_tree(filter, root);
1716 if (err)
1717 goto fail;
1718
1719 /* We don't set root until we know it works */
1720 barrier();
1721 filter->root = root;
1722 }
1723
1724 err = 0;
1725 fail:
1726 __free_pred_stack(&stack);
1727 return err;
1728 }
1729
1730 static inline void event_set_filtered_flag(struct trace_event_file *file)
1731 {
1732 struct trace_event_call *call = file->event_call;
1733
1734 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1735 call->flags |= TRACE_EVENT_FL_FILTERED;
1736 else
1737 file->flags |= EVENT_FILE_FL_FILTERED;
1738 }
1739
1740 static inline void event_set_filter(struct trace_event_file *file,
1741 struct event_filter *filter)
1742 {
1743 struct trace_event_call *call = file->event_call;
1744
1745 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1746 rcu_assign_pointer(call->filter, filter);
1747 else
1748 rcu_assign_pointer(file->filter, filter);
1749 }
1750
1751 static inline void event_clear_filter(struct trace_event_file *file)
1752 {
1753 struct trace_event_call *call = file->event_call;
1754
1755 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1756 RCU_INIT_POINTER(call->filter, NULL);
1757 else
1758 RCU_INIT_POINTER(file->filter, NULL);
1759 }
1760
1761 static inline void
1762 event_set_no_set_filter_flag(struct trace_event_file *file)
1763 {
1764 struct trace_event_call *call = file->event_call;
1765
1766 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1767 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1768 else
1769 file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
1770 }
1771
1772 static inline void
1773 event_clear_no_set_filter_flag(struct trace_event_file *file)
1774 {
1775 struct trace_event_call *call = file->event_call;
1776
1777 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1778 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1779 else
1780 file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
1781 }
1782
1783 static inline bool
1784 event_no_set_filter_flag(struct trace_event_file *file)
1785 {
1786 struct trace_event_call *call = file->event_call;
1787
1788 if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
1789 return true;
1790
1791 if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
1792 (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
1793 return true;
1794
1795 return false;
1796 }
1797
1798 struct filter_list {
1799 struct list_head list;
1800 struct event_filter *filter;
1801 };
1802
1803 static int replace_system_preds(struct trace_subsystem_dir *dir,
1804 struct trace_array *tr,
1805 struct filter_parse_state *ps,
1806 char *filter_string)
1807 {
1808 struct trace_event_file *file;
1809 struct filter_list *filter_item;
1810 struct filter_list *tmp;
1811 LIST_HEAD(filter_list);
1812 bool fail = true;
1813 int err;
1814
1815 list_for_each_entry(file, &tr->events, list) {
1816 if (file->system != dir)
1817 continue;
1818
1819 /*
1820 * Try to see if the filter can be applied
1821 * (filter arg is ignored on dry_run)
1822 */
1823 err = replace_preds(file->event_call, NULL, ps, true);
1824 if (err)
1825 event_set_no_set_filter_flag(file);
1826 else
1827 event_clear_no_set_filter_flag(file);
1828 }
1829
1830 list_for_each_entry(file, &tr->events, list) {
1831 struct event_filter *filter;
1832
1833 if (file->system != dir)
1834 continue;
1835
1836 if (event_no_set_filter_flag(file))
1837 continue;
1838
1839 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1840 if (!filter_item)
1841 goto fail_mem;
1842
1843 list_add_tail(&filter_item->list, &filter_list);
1844
1845 filter_item->filter = __alloc_filter();
1846 if (!filter_item->filter)
1847 goto fail_mem;
1848 filter = filter_item->filter;
1849
1850 /* Can only fail on no memory */
1851 err = replace_filter_string(filter, filter_string);
1852 if (err)
1853 goto fail_mem;
1854
1855 err = replace_preds(file->event_call, filter, ps, false);
1856 if (err) {
1857 filter_disable(file);
1858 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1859 append_filter_err(ps, filter);
1860 } else
1861 event_set_filtered_flag(file);
1862 /*
1863 * Regardless of if this returned an error, we still
1864 * replace the filter for the call.
1865 */
1866 filter = event_filter(file);
1867 event_set_filter(file, filter_item->filter);
1868 filter_item->filter = filter;
1869
1870 fail = false;
1871 }
1872
1873 if (fail)
1874 goto fail;
1875
1876 /*
1877 * The calls can still be using the old filters.
1878 * Do a synchronize_sched() to ensure all calls are
1879 * done with them before we free them.
1880 */
1881 synchronize_sched();
1882 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1883 __free_filter(filter_item->filter);
1884 list_del(&filter_item->list);
1885 kfree(filter_item);
1886 }
1887 return 0;
1888 fail:
1889 /* No call succeeded */
1890 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1891 list_del(&filter_item->list);
1892 kfree(filter_item);
1893 }
1894 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1895 return -EINVAL;
1896 fail_mem:
1897 /* If any call succeeded, we still need to sync */
1898 if (!fail)
1899 synchronize_sched();
1900 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1901 __free_filter(filter_item->filter);
1902 list_del(&filter_item->list);
1903 kfree(filter_item);
1904 }
1905 return -ENOMEM;
1906 }
1907
1908 static int create_filter_start(char *filter_str, bool set_str,
1909 struct filter_parse_state **psp,
1910 struct event_filter **filterp)
1911 {
1912 struct event_filter *filter;
1913 struct filter_parse_state *ps = NULL;
1914 int err = 0;
1915
1916 WARN_ON_ONCE(*psp || *filterp);
1917
1918 /* allocate everything, and if any fails, free all and fail */
1919 filter = __alloc_filter();
1920 if (filter && set_str)
1921 err = replace_filter_string(filter, filter_str);
1922
1923 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1924
1925 if (!filter || !ps || err) {
1926 kfree(ps);
1927 __free_filter(filter);
1928 return -ENOMEM;
1929 }
1930
1931 /* we're committed to creating a new filter */
1932 *filterp = filter;
1933 *psp = ps;
1934
1935 parse_init(ps, filter_ops, filter_str);
1936 err = filter_parse(ps);
1937 if (err && set_str)
1938 append_filter_err(ps, filter);
1939 return err;
1940 }
1941
1942 static void create_filter_finish(struct filter_parse_state *ps)
1943 {
1944 if (ps) {
1945 filter_opstack_clear(ps);
1946 postfix_clear(ps);
1947 kfree(ps);
1948 }
1949 }
1950
1951 /**
1952 * create_filter - create a filter for a trace_event_call
1953 * @call: trace_event_call to create a filter for
1954 * @filter_str: filter string
1955 * @set_str: remember @filter_str and enable detailed error in filter
1956 * @filterp: out param for created filter (always updated on return)
1957 *
1958 * Creates a filter for @call with @filter_str. If @set_str is %true,
1959 * @filter_str is copied and recorded in the new filter.
1960 *
1961 * On success, returns 0 and *@filterp points to the new filter. On
1962 * failure, returns -errno and *@filterp may point to %NULL or to a new
1963 * filter. In the latter case, the returned filter contains error
1964 * information if @set_str is %true and the caller is responsible for
1965 * freeing it.
1966 */
1967 static int create_filter(struct trace_event_call *call,
1968 char *filter_str, bool set_str,
1969 struct event_filter **filterp)
1970 {
1971 struct event_filter *filter = NULL;
1972 struct filter_parse_state *ps = NULL;
1973 int err;
1974
1975 err = create_filter_start(filter_str, set_str, &ps, &filter);
1976 if (!err) {
1977 err = replace_preds(call, filter, ps, false);
1978 if (err && set_str)
1979 append_filter_err(ps, filter);
1980 }
1981 create_filter_finish(ps);
1982
1983 *filterp = filter;
1984 return err;
1985 }
1986
1987 int create_event_filter(struct trace_event_call *call,
1988 char *filter_str, bool set_str,
1989 struct event_filter **filterp)
1990 {
1991 return create_filter(call, filter_str, set_str, filterp);
1992 }
1993
1994 /**
1995 * create_system_filter - create a filter for an event_subsystem
1996 * @system: event_subsystem to create a filter for
1997 * @filter_str: filter string
1998 * @filterp: out param for created filter (always updated on return)
1999 *
2000 * Identical to create_filter() except that it creates a subsystem filter
2001 * and always remembers @filter_str.
2002 */
2003 static int create_system_filter(struct trace_subsystem_dir *dir,
2004 struct trace_array *tr,
2005 char *filter_str, struct event_filter **filterp)
2006 {
2007 struct event_filter *filter = NULL;
2008 struct filter_parse_state *ps = NULL;
2009 int err;
2010
2011 err = create_filter_start(filter_str, true, &ps, &filter);
2012 if (!err) {
2013 err = replace_system_preds(dir, tr, ps, filter_str);
2014 if (!err) {
2015 /* System filters just show a default message */
2016 kfree(filter->filter_string);
2017 filter->filter_string = NULL;
2018 } else {
2019 append_filter_err(ps, filter);
2020 }
2021 }
2022 create_filter_finish(ps);
2023
2024 *filterp = filter;
2025 return err;
2026 }
2027
2028 /* caller must hold event_mutex */
2029 int apply_event_filter(struct trace_event_file *file, char *filter_string)
2030 {
2031 struct trace_event_call *call = file->event_call;
2032 struct event_filter *filter;
2033 int err;
2034
2035 if (!strcmp(strstrip(filter_string), "0")) {
2036 filter_disable(file);
2037 filter = event_filter(file);
2038
2039 if (!filter)
2040 return 0;
2041
2042 event_clear_filter(file);
2043
2044 /* Make sure the filter is not being used */
2045 synchronize_sched();
2046 __free_filter(filter);
2047
2048 return 0;
2049 }
2050
2051 err = create_filter(call, filter_string, true, &filter);
2052
2053 /*
2054 * Always swap the call filter with the new filter
2055 * even if there was an error. If there was an error
2056 * in the filter, we disable the filter and show the error
2057 * string
2058 */
2059 if (filter) {
2060 struct event_filter *tmp;
2061
2062 tmp = event_filter(file);
2063 if (!err)
2064 event_set_filtered_flag(file);
2065 else
2066 filter_disable(file);
2067
2068 event_set_filter(file, filter);
2069
2070 if (tmp) {
2071 /* Make sure the call is done with the filter */
2072 synchronize_sched();
2073 __free_filter(tmp);
2074 }
2075 }
2076
2077 return err;
2078 }
2079
2080 int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
2081 char *filter_string)
2082 {
2083 struct event_subsystem *system = dir->subsystem;
2084 struct trace_array *tr = dir->tr;
2085 struct event_filter *filter;
2086 int err = 0;
2087
2088 mutex_lock(&event_mutex);
2089
2090 /* Make sure the system still has events */
2091 if (!dir->nr_events) {
2092 err = -ENODEV;
2093 goto out_unlock;
2094 }
2095
2096 if (!strcmp(strstrip(filter_string), "0")) {
2097 filter_free_subsystem_preds(dir, tr);
2098 remove_filter_string(system->filter);
2099 filter = system->filter;
2100 system->filter = NULL;
2101 /* Ensure all filters are no longer used */
2102 synchronize_sched();
2103 filter_free_subsystem_filters(dir, tr);
2104 __free_filter(filter);
2105 goto out_unlock;
2106 }
2107
2108 err = create_system_filter(dir, tr, filter_string, &filter);
2109 if (filter) {
2110 /*
2111 * No event actually uses the system filter
2112 * we can free it without synchronize_sched().
2113 */
2114 __free_filter(system->filter);
2115 system->filter = filter;
2116 }
2117 out_unlock:
2118 mutex_unlock(&event_mutex);
2119
2120 return err;
2121 }
2122
2123 #ifdef CONFIG_PERF_EVENTS
2124
2125 void ftrace_profile_free_filter(struct perf_event *event)
2126 {
2127 struct event_filter *filter = event->filter;
2128
2129 event->filter = NULL;
2130 __free_filter(filter);
2131 }
2132
2133 struct function_filter_data {
2134 struct ftrace_ops *ops;
2135 int first_filter;
2136 int first_notrace;
2137 };
2138
2139 #ifdef CONFIG_FUNCTION_TRACER
2140 static char **
2141 ftrace_function_filter_re(char *buf, int len, int *count)
2142 {
2143 char *str, **re;
2144
2145 str = kstrndup(buf, len, GFP_KERNEL);
2146 if (!str)
2147 return NULL;
2148
2149 /*
2150 * The argv_split function takes white space
2151 * as a separator, so convert ',' into spaces.
2152 */
2153 strreplace(str, ',', ' ');
2154
2155 re = argv_split(GFP_KERNEL, str, count);
2156 kfree(str);
2157 return re;
2158 }
2159
2160 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2161 int reset, char *re, int len)
2162 {
2163 int ret;
2164
2165 if (filter)
2166 ret = ftrace_set_filter(ops, re, len, reset);
2167 else
2168 ret = ftrace_set_notrace(ops, re, len, reset);
2169
2170 return ret;
2171 }
2172
2173 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2174 struct function_filter_data *data)
2175 {
2176 int i, re_cnt, ret = -EINVAL;
2177 int *reset;
2178 char **re;
2179
2180 reset = filter ? &data->first_filter : &data->first_notrace;
2181
2182 /*
2183 * The 'ip' field could have multiple filters set, separated
2184 * either by space or comma. We first cut the filter and apply
2185 * all pieces separatelly.
2186 */
2187 re = ftrace_function_filter_re(buf, len, &re_cnt);
2188 if (!re)
2189 return -EINVAL;
2190
2191 for (i = 0; i < re_cnt; i++) {
2192 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2193 re[i], strlen(re[i]));
2194 if (ret)
2195 break;
2196
2197 if (*reset)
2198 *reset = 0;
2199 }
2200
2201 argv_free(re);
2202 return ret;
2203 }
2204
2205 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2206 {
2207 struct ftrace_event_field *field = pred->field;
2208
2209 if (leaf) {
2210 /*
2211 * Check the leaf predicate for function trace, verify:
2212 * - only '==' and '!=' is used
2213 * - the 'ip' field is used
2214 */
2215 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2216 return -EINVAL;
2217
2218 if (strcmp(field->name, "ip"))
2219 return -EINVAL;
2220 } else {
2221 /*
2222 * Check the non leaf predicate for function trace, verify:
2223 * - only '||' is used
2224 */
2225 if (pred->op != OP_OR)
2226 return -EINVAL;
2227 }
2228
2229 return 0;
2230 }
2231
2232 static int ftrace_function_set_filter_cb(enum move_type move,
2233 struct filter_pred *pred,
2234 int *err, void *data)
2235 {
2236 /* Checking the node is valid for function trace. */
2237 if ((move != MOVE_DOWN) ||
2238 (pred->left != FILTER_PRED_INVALID)) {
2239 *err = ftrace_function_check_pred(pred, 0);
2240 } else {
2241 *err = ftrace_function_check_pred(pred, 1);
2242 if (*err)
2243 return WALK_PRED_ABORT;
2244
2245 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2246 pred->regex.pattern,
2247 pred->regex.len,
2248 data);
2249 }
2250
2251 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2252 }
2253
2254 static int ftrace_function_set_filter(struct perf_event *event,
2255 struct event_filter *filter)
2256 {
2257 struct function_filter_data data = {
2258 .first_filter = 1,
2259 .first_notrace = 1,
2260 .ops = &event->ftrace_ops,
2261 };
2262
2263 return walk_pred_tree(filter->preds, filter->root,
2264 ftrace_function_set_filter_cb, &data);
2265 }
2266 #else
2267 static int ftrace_function_set_filter(struct perf_event *event,
2268 struct event_filter *filter)
2269 {
2270 return -ENODEV;
2271 }
2272 #endif /* CONFIG_FUNCTION_TRACER */
2273
2274 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2275 char *filter_str)
2276 {
2277 int err;
2278 struct event_filter *filter;
2279 struct trace_event_call *call;
2280
2281 mutex_lock(&event_mutex);
2282
2283 call = event->tp_event;
2284
2285 err = -EINVAL;
2286 if (!call)
2287 goto out_unlock;
2288
2289 err = -EEXIST;
2290 if (event->filter)
2291 goto out_unlock;
2292
2293 err = create_filter(call, filter_str, false, &filter);
2294 if (err)
2295 goto free_filter;
2296
2297 if (ftrace_event_is_function(call))
2298 err = ftrace_function_set_filter(event, filter);
2299 else
2300 event->filter = filter;
2301
2302 free_filter:
2303 if (err || ftrace_event_is_function(call))
2304 __free_filter(filter);
2305
2306 out_unlock:
2307 mutex_unlock(&event_mutex);
2308
2309 return err;
2310 }
2311
2312 #endif /* CONFIG_PERF_EVENTS */
2313
2314 #ifdef CONFIG_FTRACE_STARTUP_TEST
2315
2316 #include <linux/types.h>
2317 #include <linux/tracepoint.h>
2318
2319 #define CREATE_TRACE_POINTS
2320 #include "trace_events_filter_test.h"
2321
2322 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2323 { \
2324 .filter = FILTER, \
2325 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2326 .e = ve, .f = vf, .g = vg, .h = vh }, \
2327 .match = m, \
2328 .not_visited = nvisit, \
2329 }
2330 #define YES 1
2331 #define NO 0
2332
2333 static struct test_filter_data_t {
2334 char *filter;
2335 struct trace_event_raw_ftrace_test_filter rec;
2336 int match;
2337 char *not_visited;
2338 } test_filter_data[] = {
2339 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2340 "e == 1 && f == 1 && g == 1 && h == 1"
2341 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2342 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2343 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2344 #undef FILTER
2345 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2346 "e == 1 || f == 1 || g == 1 || h == 1"
2347 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2348 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2349 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2350 #undef FILTER
2351 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2352 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2353 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2354 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2355 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2356 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2357 #undef FILTER
2358 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2359 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2360 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2361 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2362 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2363 #undef FILTER
2364 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2365 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2366 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2367 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2368 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2369 #undef FILTER
2370 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2371 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2372 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2373 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2374 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2375 #undef FILTER
2376 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2377 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2378 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2379 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2380 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2381 #undef FILTER
2382 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2383 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2384 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2385 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2386 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2387 };
2388
2389 #undef DATA_REC
2390 #undef FILTER
2391 #undef YES
2392 #undef NO
2393
2394 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2395
2396 static int test_pred_visited;
2397
2398 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2399 {
2400 struct ftrace_event_field *field = pred->field;
2401
2402 test_pred_visited = 1;
2403 printk(KERN_INFO "\npred visited %s\n", field->name);
2404 return 1;
2405 }
2406
2407 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2408 int *err, void *data)
2409 {
2410 char *fields = data;
2411
2412 if ((move == MOVE_DOWN) &&
2413 (pred->left == FILTER_PRED_INVALID)) {
2414 struct ftrace_event_field *field = pred->field;
2415
2416 if (!field) {
2417 WARN(1, "all leafs should have field defined");
2418 return WALK_PRED_DEFAULT;
2419 }
2420 if (!strchr(fields, *field->name))
2421 return WALK_PRED_DEFAULT;
2422
2423 WARN_ON(!pred->fn);
2424 pred->fn = test_pred_visited_fn;
2425 }
2426 return WALK_PRED_DEFAULT;
2427 }
2428
2429 static __init int ftrace_test_event_filter(void)
2430 {
2431 int i;
2432
2433 printk(KERN_INFO "Testing ftrace filter: ");
2434
2435 for (i = 0; i < DATA_CNT; i++) {
2436 struct event_filter *filter = NULL;
2437 struct test_filter_data_t *d = &test_filter_data[i];
2438 int err;
2439
2440 err = create_filter(&event_ftrace_test_filter, d->filter,
2441 false, &filter);
2442 if (err) {
2443 printk(KERN_INFO
2444 "Failed to get filter for '%s', err %d\n",
2445 d->filter, err);
2446 __free_filter(filter);
2447 break;
2448 }
2449
2450 /*
2451 * The preemption disabling is not really needed for self
2452 * tests, but the rcu dereference will complain without it.
2453 */
2454 preempt_disable();
2455 if (*d->not_visited)
2456 walk_pred_tree(filter->preds, filter->root,
2457 test_walk_pred_cb,
2458 d->not_visited);
2459
2460 test_pred_visited = 0;
2461 err = filter_match_preds(filter, &d->rec);
2462 preempt_enable();
2463
2464 __free_filter(filter);
2465
2466 if (test_pred_visited) {
2467 printk(KERN_INFO
2468 "Failed, unwanted pred visited for filter %s\n",
2469 d->filter);
2470 break;
2471 }
2472
2473 if (err != d->match) {
2474 printk(KERN_INFO
2475 "Failed to match filter '%s', expected %d\n",
2476 d->filter, d->match);
2477 break;
2478 }
2479 }
2480
2481 if (i == DATA_CNT)
2482 printk(KERN_CONT "OK\n");
2483
2484 return 0;
2485 }
2486
2487 late_initcall(ftrace_test_event_filter);
2488
2489 #endif /* CONFIG_FTRACE_STARTUP_TEST */
This page took 0.090751 seconds and 5 git commands to generate.