propogate_mnt: Handle the first propogated copy being a slave
[deliverable/linux.git] / kernel / trace / trace_events_filter.c
1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_BAND,
48 OP_NOT,
49 OP_NONE,
50 OP_OPEN_PAREN,
51 };
52
53 struct filter_op {
54 int id;
55 char *string;
56 int precedence;
57 };
58
59 /* Order must be the same as enum filter_op_ids above */
60 static struct filter_op filter_ops[] = {
61 { OP_OR, "||", 1 },
62 { OP_AND, "&&", 2 },
63 { OP_GLOB, "~", 4 },
64 { OP_NE, "!=", 4 },
65 { OP_EQ, "==", 4 },
66 { OP_LT, "<", 5 },
67 { OP_LE, "<=", 5 },
68 { OP_GT, ">", 5 },
69 { OP_GE, ">=", 5 },
70 { OP_BAND, "&", 6 },
71 { OP_NOT, "!", 6 },
72 { OP_NONE, "OP_NONE", 0 },
73 { OP_OPEN_PAREN, "(", 0 },
74 };
75
76 enum {
77 FILT_ERR_NONE,
78 FILT_ERR_INVALID_OP,
79 FILT_ERR_UNBALANCED_PAREN,
80 FILT_ERR_TOO_MANY_OPERANDS,
81 FILT_ERR_OPERAND_TOO_LONG,
82 FILT_ERR_FIELD_NOT_FOUND,
83 FILT_ERR_ILLEGAL_FIELD_OP,
84 FILT_ERR_ILLEGAL_INTVAL,
85 FILT_ERR_BAD_SUBSYS_FILTER,
86 FILT_ERR_TOO_MANY_PREDS,
87 FILT_ERR_MISSING_FIELD,
88 FILT_ERR_INVALID_FILTER,
89 FILT_ERR_IP_FIELD_ONLY,
90 FILT_ERR_ILLEGAL_NOT_OP,
91 };
92
93 static char *err_text[] = {
94 "No error",
95 "Invalid operator",
96 "Unbalanced parens",
97 "Too many operands",
98 "Operand too long",
99 "Field not found",
100 "Illegal operation for field type",
101 "Illegal integer value",
102 "Couldn't find or set field in one of a subsystem's events",
103 "Too many terms in predicate expression",
104 "Missing field name and/or value",
105 "Meaningless filter expression",
106 "Only 'ip' field is supported for function trace",
107 "Illegal use of '!'",
108 };
109
110 struct opstack_op {
111 int op;
112 struct list_head list;
113 };
114
115 struct postfix_elt {
116 int op;
117 char *operand;
118 struct list_head list;
119 };
120
121 struct filter_parse_state {
122 struct filter_op *ops;
123 struct list_head opstack;
124 struct list_head postfix;
125 int lasterr;
126 int lasterr_pos;
127
128 struct {
129 char *string;
130 unsigned int cnt;
131 unsigned int tail;
132 } infix;
133
134 struct {
135 char string[MAX_FILTER_STR_VAL];
136 int pos;
137 unsigned int tail;
138 } operand;
139 };
140
141 struct pred_stack {
142 struct filter_pred **preds;
143 int index;
144 };
145
146 /* If not of not match is equal to not of not, then it is a match */
147 #define DEFINE_COMPARISON_PRED(type) \
148 static int filter_pred_##type(struct filter_pred *pred, void *event) \
149 { \
150 type *addr = (type *)(event + pred->offset); \
151 type val = (type)pred->val; \
152 int match = 0; \
153 \
154 switch (pred->op) { \
155 case OP_LT: \
156 match = (*addr < val); \
157 break; \
158 case OP_LE: \
159 match = (*addr <= val); \
160 break; \
161 case OP_GT: \
162 match = (*addr > val); \
163 break; \
164 case OP_GE: \
165 match = (*addr >= val); \
166 break; \
167 case OP_BAND: \
168 match = (*addr & val); \
169 break; \
170 default: \
171 break; \
172 } \
173 \
174 return !!match == !pred->not; \
175 }
176
177 #define DEFINE_EQUALITY_PRED(size) \
178 static int filter_pred_##size(struct filter_pred *pred, void *event) \
179 { \
180 u##size *addr = (u##size *)(event + pred->offset); \
181 u##size val = (u##size)pred->val; \
182 int match; \
183 \
184 match = (val == *addr) ^ pred->not; \
185 \
186 return match; \
187 }
188
189 DEFINE_COMPARISON_PRED(s64);
190 DEFINE_COMPARISON_PRED(u64);
191 DEFINE_COMPARISON_PRED(s32);
192 DEFINE_COMPARISON_PRED(u32);
193 DEFINE_COMPARISON_PRED(s16);
194 DEFINE_COMPARISON_PRED(u16);
195 DEFINE_COMPARISON_PRED(s8);
196 DEFINE_COMPARISON_PRED(u8);
197
198 DEFINE_EQUALITY_PRED(64);
199 DEFINE_EQUALITY_PRED(32);
200 DEFINE_EQUALITY_PRED(16);
201 DEFINE_EQUALITY_PRED(8);
202
203 /* Filter predicate for fixed sized arrays of characters */
204 static int filter_pred_string(struct filter_pred *pred, void *event)
205 {
206 char *addr = (char *)(event + pred->offset);
207 int cmp, match;
208
209 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
210
211 match = cmp ^ pred->not;
212
213 return match;
214 }
215
216 /* Filter predicate for char * pointers */
217 static int filter_pred_pchar(struct filter_pred *pred, void *event)
218 {
219 char **addr = (char **)(event + pred->offset);
220 int cmp, match;
221 int len = strlen(*addr) + 1; /* including tailing '\0' */
222
223 cmp = pred->regex.match(*addr, &pred->regex, len);
224
225 match = cmp ^ pred->not;
226
227 return match;
228 }
229
230 /*
231 * Filter predicate for dynamic sized arrays of characters.
232 * These are implemented through a list of strings at the end
233 * of the entry.
234 * Also each of these strings have a field in the entry which
235 * contains its offset from the beginning of the entry.
236 * We have then first to get this field, dereference it
237 * and add it to the address of the entry, and at last we have
238 * the address of the string.
239 */
240 static int filter_pred_strloc(struct filter_pred *pred, void *event)
241 {
242 u32 str_item = *(u32 *)(event + pred->offset);
243 int str_loc = str_item & 0xffff;
244 int str_len = str_item >> 16;
245 char *addr = (char *)(event + str_loc);
246 int cmp, match;
247
248 cmp = pred->regex.match(addr, &pred->regex, str_len);
249
250 match = cmp ^ pred->not;
251
252 return match;
253 }
254
255 /* Filter predicate for CPUs. */
256 static int filter_pred_cpu(struct filter_pred *pred, void *event)
257 {
258 int cpu, cmp;
259 int match = 0;
260
261 cpu = raw_smp_processor_id();
262 cmp = pred->val;
263
264 switch (pred->op) {
265 case OP_EQ:
266 match = cpu == cmp;
267 break;
268 case OP_LT:
269 match = cpu < cmp;
270 break;
271 case OP_LE:
272 match = cpu <= cmp;
273 break;
274 case OP_GT:
275 match = cpu > cmp;
276 break;
277 case OP_GE:
278 match = cpu >= cmp;
279 break;
280 default:
281 break;
282 }
283
284 return !!match == !pred->not;
285 }
286
287 /* Filter predicate for COMM. */
288 static int filter_pred_comm(struct filter_pred *pred, void *event)
289 {
290 int cmp, match;
291
292 cmp = pred->regex.match(current->comm, &pred->regex,
293 pred->regex.field_len);
294 match = cmp ^ pred->not;
295
296 return match;
297 }
298
299 static int filter_pred_none(struct filter_pred *pred, void *event)
300 {
301 return 0;
302 }
303
304 /*
305 * regex_match_foo - Basic regex callbacks
306 *
307 * @str: the string to be searched
308 * @r: the regex structure containing the pattern string
309 * @len: the length of the string to be searched (including '\0')
310 *
311 * Note:
312 * - @str might not be NULL-terminated if it's of type DYN_STRING
313 * or STATIC_STRING
314 */
315
316 static int regex_match_full(char *str, struct regex *r, int len)
317 {
318 if (strncmp(str, r->pattern, len) == 0)
319 return 1;
320 return 0;
321 }
322
323 static int regex_match_front(char *str, struct regex *r, int len)
324 {
325 if (strncmp(str, r->pattern, r->len) == 0)
326 return 1;
327 return 0;
328 }
329
330 static int regex_match_middle(char *str, struct regex *r, int len)
331 {
332 if (strnstr(str, r->pattern, len))
333 return 1;
334 return 0;
335 }
336
337 static int regex_match_end(char *str, struct regex *r, int len)
338 {
339 int strlen = len - 1;
340
341 if (strlen >= r->len &&
342 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
343 return 1;
344 return 0;
345 }
346
347 /**
348 * filter_parse_regex - parse a basic regex
349 * @buff: the raw regex
350 * @len: length of the regex
351 * @search: will point to the beginning of the string to compare
352 * @not: tell whether the match will have to be inverted
353 *
354 * This passes in a buffer containing a regex and this function will
355 * set search to point to the search part of the buffer and
356 * return the type of search it is (see enum above).
357 * This does modify buff.
358 *
359 * Returns enum type.
360 * search returns the pointer to use for comparison.
361 * not returns 1 if buff started with a '!'
362 * 0 otherwise.
363 */
364 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
365 {
366 int type = MATCH_FULL;
367 int i;
368
369 if (buff[0] == '!') {
370 *not = 1;
371 buff++;
372 len--;
373 } else
374 *not = 0;
375
376 *search = buff;
377
378 for (i = 0; i < len; i++) {
379 if (buff[i] == '*') {
380 if (!i) {
381 *search = buff + 1;
382 type = MATCH_END_ONLY;
383 } else {
384 if (type == MATCH_END_ONLY)
385 type = MATCH_MIDDLE_ONLY;
386 else
387 type = MATCH_FRONT_ONLY;
388 buff[i] = 0;
389 break;
390 }
391 }
392 }
393
394 return type;
395 }
396
397 static void filter_build_regex(struct filter_pred *pred)
398 {
399 struct regex *r = &pred->regex;
400 char *search;
401 enum regex_type type = MATCH_FULL;
402 int not = 0;
403
404 if (pred->op == OP_GLOB) {
405 type = filter_parse_regex(r->pattern, r->len, &search, &not);
406 r->len = strlen(search);
407 memmove(r->pattern, search, r->len+1);
408 }
409
410 switch (type) {
411 case MATCH_FULL:
412 r->match = regex_match_full;
413 break;
414 case MATCH_FRONT_ONLY:
415 r->match = regex_match_front;
416 break;
417 case MATCH_MIDDLE_ONLY:
418 r->match = regex_match_middle;
419 break;
420 case MATCH_END_ONLY:
421 r->match = regex_match_end;
422 break;
423 }
424
425 pred->not ^= not;
426 }
427
428 enum move_type {
429 MOVE_DOWN,
430 MOVE_UP_FROM_LEFT,
431 MOVE_UP_FROM_RIGHT
432 };
433
434 static struct filter_pred *
435 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
436 int index, enum move_type *move)
437 {
438 if (pred->parent & FILTER_PRED_IS_RIGHT)
439 *move = MOVE_UP_FROM_RIGHT;
440 else
441 *move = MOVE_UP_FROM_LEFT;
442 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
443
444 return pred;
445 }
446
447 enum walk_return {
448 WALK_PRED_ABORT,
449 WALK_PRED_PARENT,
450 WALK_PRED_DEFAULT,
451 };
452
453 typedef int (*filter_pred_walkcb_t) (enum move_type move,
454 struct filter_pred *pred,
455 int *err, void *data);
456
457 static int walk_pred_tree(struct filter_pred *preds,
458 struct filter_pred *root,
459 filter_pred_walkcb_t cb, void *data)
460 {
461 struct filter_pred *pred = root;
462 enum move_type move = MOVE_DOWN;
463 int done = 0;
464
465 if (!preds)
466 return -EINVAL;
467
468 do {
469 int err = 0, ret;
470
471 ret = cb(move, pred, &err, data);
472 if (ret == WALK_PRED_ABORT)
473 return err;
474 if (ret == WALK_PRED_PARENT)
475 goto get_parent;
476
477 switch (move) {
478 case MOVE_DOWN:
479 if (pred->left != FILTER_PRED_INVALID) {
480 pred = &preds[pred->left];
481 continue;
482 }
483 goto get_parent;
484 case MOVE_UP_FROM_LEFT:
485 pred = &preds[pred->right];
486 move = MOVE_DOWN;
487 continue;
488 case MOVE_UP_FROM_RIGHT:
489 get_parent:
490 if (pred == root)
491 break;
492 pred = get_pred_parent(pred, preds,
493 pred->parent,
494 &move);
495 continue;
496 }
497 done = 1;
498 } while (!done);
499
500 /* We are fine. */
501 return 0;
502 }
503
504 /*
505 * A series of AND or ORs where found together. Instead of
506 * climbing up and down the tree branches, an array of the
507 * ops were made in order of checks. We can just move across
508 * the array and short circuit if needed.
509 */
510 static int process_ops(struct filter_pred *preds,
511 struct filter_pred *op, void *rec)
512 {
513 struct filter_pred *pred;
514 int match = 0;
515 int type;
516 int i;
517
518 /*
519 * Micro-optimization: We set type to true if op
520 * is an OR and false otherwise (AND). Then we
521 * just need to test if the match is equal to
522 * the type, and if it is, we can short circuit the
523 * rest of the checks:
524 *
525 * if ((match && op->op == OP_OR) ||
526 * (!match && op->op == OP_AND))
527 * return match;
528 */
529 type = op->op == OP_OR;
530
531 for (i = 0; i < op->val; i++) {
532 pred = &preds[op->ops[i]];
533 if (!WARN_ON_ONCE(!pred->fn))
534 match = pred->fn(pred, rec);
535 if (!!match == type)
536 break;
537 }
538 /* If not of not match is equal to not of not, then it is a match */
539 return !!match == !op->not;
540 }
541
542 struct filter_match_preds_data {
543 struct filter_pred *preds;
544 int match;
545 void *rec;
546 };
547
548 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
549 int *err, void *data)
550 {
551 struct filter_match_preds_data *d = data;
552
553 *err = 0;
554 switch (move) {
555 case MOVE_DOWN:
556 /* only AND and OR have children */
557 if (pred->left != FILTER_PRED_INVALID) {
558 /* If ops is set, then it was folded. */
559 if (!pred->ops)
560 return WALK_PRED_DEFAULT;
561 /* We can treat folded ops as a leaf node */
562 d->match = process_ops(d->preds, pred, d->rec);
563 } else {
564 if (!WARN_ON_ONCE(!pred->fn))
565 d->match = pred->fn(pred, d->rec);
566 }
567
568 return WALK_PRED_PARENT;
569 case MOVE_UP_FROM_LEFT:
570 /*
571 * Check for short circuits.
572 *
573 * Optimization: !!match == (pred->op == OP_OR)
574 * is the same as:
575 * if ((match && pred->op == OP_OR) ||
576 * (!match && pred->op == OP_AND))
577 */
578 if (!!d->match == (pred->op == OP_OR))
579 return WALK_PRED_PARENT;
580 break;
581 case MOVE_UP_FROM_RIGHT:
582 break;
583 }
584
585 return WALK_PRED_DEFAULT;
586 }
587
588 /* return 1 if event matches, 0 otherwise (discard) */
589 int filter_match_preds(struct event_filter *filter, void *rec)
590 {
591 struct filter_pred *preds;
592 struct filter_pred *root;
593 struct filter_match_preds_data data = {
594 /* match is currently meaningless */
595 .match = -1,
596 .rec = rec,
597 };
598 int n_preds, ret;
599
600 /* no filter is considered a match */
601 if (!filter)
602 return 1;
603
604 n_preds = filter->n_preds;
605 if (!n_preds)
606 return 1;
607
608 /*
609 * n_preds, root and filter->preds are protect with preemption disabled.
610 */
611 root = rcu_dereference_sched(filter->root);
612 if (!root)
613 return 1;
614
615 data.preds = preds = rcu_dereference_sched(filter->preds);
616 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
617 WARN_ON(ret);
618 return data.match;
619 }
620 EXPORT_SYMBOL_GPL(filter_match_preds);
621
622 static void parse_error(struct filter_parse_state *ps, int err, int pos)
623 {
624 ps->lasterr = err;
625 ps->lasterr_pos = pos;
626 }
627
628 static void remove_filter_string(struct event_filter *filter)
629 {
630 if (!filter)
631 return;
632
633 kfree(filter->filter_string);
634 filter->filter_string = NULL;
635 }
636
637 static int replace_filter_string(struct event_filter *filter,
638 char *filter_string)
639 {
640 kfree(filter->filter_string);
641 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
642 if (!filter->filter_string)
643 return -ENOMEM;
644
645 return 0;
646 }
647
648 static int append_filter_string(struct event_filter *filter,
649 char *string)
650 {
651 int newlen;
652 char *new_filter_string;
653
654 BUG_ON(!filter->filter_string);
655 newlen = strlen(filter->filter_string) + strlen(string) + 1;
656 new_filter_string = kmalloc(newlen, GFP_KERNEL);
657 if (!new_filter_string)
658 return -ENOMEM;
659
660 strcpy(new_filter_string, filter->filter_string);
661 strcat(new_filter_string, string);
662 kfree(filter->filter_string);
663 filter->filter_string = new_filter_string;
664
665 return 0;
666 }
667
668 static void append_filter_err(struct filter_parse_state *ps,
669 struct event_filter *filter)
670 {
671 int pos = ps->lasterr_pos;
672 char *buf, *pbuf;
673
674 buf = (char *)__get_free_page(GFP_TEMPORARY);
675 if (!buf)
676 return;
677
678 append_filter_string(filter, "\n");
679 memset(buf, ' ', PAGE_SIZE);
680 if (pos > PAGE_SIZE - 128)
681 pos = 0;
682 buf[pos] = '^';
683 pbuf = &buf[pos] + 1;
684
685 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
686 append_filter_string(filter, buf);
687 free_page((unsigned long) buf);
688 }
689
690 static inline struct event_filter *event_filter(struct trace_event_file *file)
691 {
692 if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
693 return file->event_call->filter;
694 else
695 return file->filter;
696 }
697
698 /* caller must hold event_mutex */
699 void print_event_filter(struct trace_event_file *file, struct trace_seq *s)
700 {
701 struct event_filter *filter = event_filter(file);
702
703 if (filter && filter->filter_string)
704 trace_seq_printf(s, "%s\n", filter->filter_string);
705 else
706 trace_seq_puts(s, "none\n");
707 }
708
709 void print_subsystem_event_filter(struct event_subsystem *system,
710 struct trace_seq *s)
711 {
712 struct event_filter *filter;
713
714 mutex_lock(&event_mutex);
715 filter = system->filter;
716 if (filter && filter->filter_string)
717 trace_seq_printf(s, "%s\n", filter->filter_string);
718 else
719 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
720 mutex_unlock(&event_mutex);
721 }
722
723 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
724 {
725 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
726 if (!stack->preds)
727 return -ENOMEM;
728 stack->index = n_preds;
729 return 0;
730 }
731
732 static void __free_pred_stack(struct pred_stack *stack)
733 {
734 kfree(stack->preds);
735 stack->index = 0;
736 }
737
738 static int __push_pred_stack(struct pred_stack *stack,
739 struct filter_pred *pred)
740 {
741 int index = stack->index;
742
743 if (WARN_ON(index == 0))
744 return -ENOSPC;
745
746 stack->preds[--index] = pred;
747 stack->index = index;
748 return 0;
749 }
750
751 static struct filter_pred *
752 __pop_pred_stack(struct pred_stack *stack)
753 {
754 struct filter_pred *pred;
755 int index = stack->index;
756
757 pred = stack->preds[index++];
758 if (!pred)
759 return NULL;
760
761 stack->index = index;
762 return pred;
763 }
764
765 static int filter_set_pred(struct event_filter *filter,
766 int idx,
767 struct pred_stack *stack,
768 struct filter_pred *src)
769 {
770 struct filter_pred *dest = &filter->preds[idx];
771 struct filter_pred *left;
772 struct filter_pred *right;
773
774 *dest = *src;
775 dest->index = idx;
776
777 if (dest->op == OP_OR || dest->op == OP_AND) {
778 right = __pop_pred_stack(stack);
779 left = __pop_pred_stack(stack);
780 if (!left || !right)
781 return -EINVAL;
782 /*
783 * If both children can be folded
784 * and they are the same op as this op or a leaf,
785 * then this op can be folded.
786 */
787 if (left->index & FILTER_PRED_FOLD &&
788 ((left->op == dest->op && !left->not) ||
789 left->left == FILTER_PRED_INVALID) &&
790 right->index & FILTER_PRED_FOLD &&
791 ((right->op == dest->op && !right->not) ||
792 right->left == FILTER_PRED_INVALID))
793 dest->index |= FILTER_PRED_FOLD;
794
795 dest->left = left->index & ~FILTER_PRED_FOLD;
796 dest->right = right->index & ~FILTER_PRED_FOLD;
797 left->parent = dest->index & ~FILTER_PRED_FOLD;
798 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
799 } else {
800 /*
801 * Make dest->left invalid to be used as a quick
802 * way to know this is a leaf node.
803 */
804 dest->left = FILTER_PRED_INVALID;
805
806 /* All leafs allow folding the parent ops. */
807 dest->index |= FILTER_PRED_FOLD;
808 }
809
810 return __push_pred_stack(stack, dest);
811 }
812
813 static void __free_preds(struct event_filter *filter)
814 {
815 int i;
816
817 if (filter->preds) {
818 for (i = 0; i < filter->n_preds; i++)
819 kfree(filter->preds[i].ops);
820 kfree(filter->preds);
821 filter->preds = NULL;
822 }
823 filter->a_preds = 0;
824 filter->n_preds = 0;
825 }
826
827 static void filter_disable(struct trace_event_file *file)
828 {
829 struct trace_event_call *call = file->event_call;
830
831 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
832 call->flags &= ~TRACE_EVENT_FL_FILTERED;
833 else
834 file->flags &= ~EVENT_FILE_FL_FILTERED;
835 }
836
837 static void __free_filter(struct event_filter *filter)
838 {
839 if (!filter)
840 return;
841
842 __free_preds(filter);
843 kfree(filter->filter_string);
844 kfree(filter);
845 }
846
847 void free_event_filter(struct event_filter *filter)
848 {
849 __free_filter(filter);
850 }
851
852 static struct event_filter *__alloc_filter(void)
853 {
854 struct event_filter *filter;
855
856 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
857 return filter;
858 }
859
860 static int __alloc_preds(struct event_filter *filter, int n_preds)
861 {
862 struct filter_pred *pred;
863 int i;
864
865 if (filter->preds)
866 __free_preds(filter);
867
868 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
869
870 if (!filter->preds)
871 return -ENOMEM;
872
873 filter->a_preds = n_preds;
874 filter->n_preds = 0;
875
876 for (i = 0; i < n_preds; i++) {
877 pred = &filter->preds[i];
878 pred->fn = filter_pred_none;
879 }
880
881 return 0;
882 }
883
884 static inline void __remove_filter(struct trace_event_file *file)
885 {
886 struct trace_event_call *call = file->event_call;
887
888 filter_disable(file);
889 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
890 remove_filter_string(call->filter);
891 else
892 remove_filter_string(file->filter);
893 }
894
895 static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir,
896 struct trace_array *tr)
897 {
898 struct trace_event_file *file;
899
900 list_for_each_entry(file, &tr->events, list) {
901 if (file->system != dir)
902 continue;
903 __remove_filter(file);
904 }
905 }
906
907 static inline void __free_subsystem_filter(struct trace_event_file *file)
908 {
909 struct trace_event_call *call = file->event_call;
910
911 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
912 __free_filter(call->filter);
913 call->filter = NULL;
914 } else {
915 __free_filter(file->filter);
916 file->filter = NULL;
917 }
918 }
919
920 static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
921 struct trace_array *tr)
922 {
923 struct trace_event_file *file;
924
925 list_for_each_entry(file, &tr->events, list) {
926 if (file->system != dir)
927 continue;
928 __free_subsystem_filter(file);
929 }
930 }
931
932 static int filter_add_pred(struct filter_parse_state *ps,
933 struct event_filter *filter,
934 struct filter_pred *pred,
935 struct pred_stack *stack)
936 {
937 int err;
938
939 if (WARN_ON(filter->n_preds == filter->a_preds)) {
940 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
941 return -ENOSPC;
942 }
943
944 err = filter_set_pred(filter, filter->n_preds, stack, pred);
945 if (err)
946 return err;
947
948 filter->n_preds++;
949
950 return 0;
951 }
952
953 int filter_assign_type(const char *type)
954 {
955 if (strstr(type, "__data_loc") && strstr(type, "char"))
956 return FILTER_DYN_STRING;
957
958 if (strchr(type, '[') && strstr(type, "char"))
959 return FILTER_STATIC_STRING;
960
961 return FILTER_OTHER;
962 }
963
964 static bool is_legal_op(struct ftrace_event_field *field, int op)
965 {
966 if (is_string_field(field) &&
967 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
968 return false;
969 if (!is_string_field(field) && op == OP_GLOB)
970 return false;
971
972 return true;
973 }
974
975 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
976 int field_is_signed)
977 {
978 filter_pred_fn_t fn = NULL;
979
980 switch (field_size) {
981 case 8:
982 if (op == OP_EQ || op == OP_NE)
983 fn = filter_pred_64;
984 else if (field_is_signed)
985 fn = filter_pred_s64;
986 else
987 fn = filter_pred_u64;
988 break;
989 case 4:
990 if (op == OP_EQ || op == OP_NE)
991 fn = filter_pred_32;
992 else if (field_is_signed)
993 fn = filter_pred_s32;
994 else
995 fn = filter_pred_u32;
996 break;
997 case 2:
998 if (op == OP_EQ || op == OP_NE)
999 fn = filter_pred_16;
1000 else if (field_is_signed)
1001 fn = filter_pred_s16;
1002 else
1003 fn = filter_pred_u16;
1004 break;
1005 case 1:
1006 if (op == OP_EQ || op == OP_NE)
1007 fn = filter_pred_8;
1008 else if (field_is_signed)
1009 fn = filter_pred_s8;
1010 else
1011 fn = filter_pred_u8;
1012 break;
1013 }
1014
1015 return fn;
1016 }
1017
1018 static int init_pred(struct filter_parse_state *ps,
1019 struct ftrace_event_field *field,
1020 struct filter_pred *pred)
1021
1022 {
1023 filter_pred_fn_t fn = filter_pred_none;
1024 unsigned long long val;
1025 int ret;
1026
1027 pred->offset = field->offset;
1028
1029 if (!is_legal_op(field, pred->op)) {
1030 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
1031 return -EINVAL;
1032 }
1033
1034 if (field->filter_type == FILTER_COMM) {
1035 filter_build_regex(pred);
1036 fn = filter_pred_comm;
1037 pred->regex.field_len = TASK_COMM_LEN;
1038 } else if (is_string_field(field)) {
1039 filter_build_regex(pred);
1040
1041 if (field->filter_type == FILTER_STATIC_STRING) {
1042 fn = filter_pred_string;
1043 pred->regex.field_len = field->size;
1044 } else if (field->filter_type == FILTER_DYN_STRING)
1045 fn = filter_pred_strloc;
1046 else
1047 fn = filter_pred_pchar;
1048 } else if (is_function_field(field)) {
1049 if (strcmp(field->name, "ip")) {
1050 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1051 return -EINVAL;
1052 }
1053 } else {
1054 if (field->is_signed)
1055 ret = kstrtoll(pred->regex.pattern, 0, &val);
1056 else
1057 ret = kstrtoull(pred->regex.pattern, 0, &val);
1058 if (ret) {
1059 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1060 return -EINVAL;
1061 }
1062 pred->val = val;
1063
1064 if (field->filter_type == FILTER_CPU)
1065 fn = filter_pred_cpu;
1066 else
1067 fn = select_comparison_fn(pred->op, field->size,
1068 field->is_signed);
1069 if (!fn) {
1070 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1071 return -EINVAL;
1072 }
1073 }
1074
1075 if (pred->op == OP_NE)
1076 pred->not ^= 1;
1077
1078 pred->fn = fn;
1079 return 0;
1080 }
1081
1082 static void parse_init(struct filter_parse_state *ps,
1083 struct filter_op *ops,
1084 char *infix_string)
1085 {
1086 memset(ps, '\0', sizeof(*ps));
1087
1088 ps->infix.string = infix_string;
1089 ps->infix.cnt = strlen(infix_string);
1090 ps->ops = ops;
1091
1092 INIT_LIST_HEAD(&ps->opstack);
1093 INIT_LIST_HEAD(&ps->postfix);
1094 }
1095
1096 static char infix_next(struct filter_parse_state *ps)
1097 {
1098 if (!ps->infix.cnt)
1099 return 0;
1100
1101 ps->infix.cnt--;
1102
1103 return ps->infix.string[ps->infix.tail++];
1104 }
1105
1106 static char infix_peek(struct filter_parse_state *ps)
1107 {
1108 if (ps->infix.tail == strlen(ps->infix.string))
1109 return 0;
1110
1111 return ps->infix.string[ps->infix.tail];
1112 }
1113
1114 static void infix_advance(struct filter_parse_state *ps)
1115 {
1116 if (!ps->infix.cnt)
1117 return;
1118
1119 ps->infix.cnt--;
1120 ps->infix.tail++;
1121 }
1122
1123 static inline int is_precedence_lower(struct filter_parse_state *ps,
1124 int a, int b)
1125 {
1126 return ps->ops[a].precedence < ps->ops[b].precedence;
1127 }
1128
1129 static inline int is_op_char(struct filter_parse_state *ps, char c)
1130 {
1131 int i;
1132
1133 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1134 if (ps->ops[i].string[0] == c)
1135 return 1;
1136 }
1137
1138 return 0;
1139 }
1140
1141 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1142 {
1143 char nextc = infix_peek(ps);
1144 char opstr[3];
1145 int i;
1146
1147 opstr[0] = firstc;
1148 opstr[1] = nextc;
1149 opstr[2] = '\0';
1150
1151 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1152 if (!strcmp(opstr, ps->ops[i].string)) {
1153 infix_advance(ps);
1154 return ps->ops[i].id;
1155 }
1156 }
1157
1158 opstr[1] = '\0';
1159
1160 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1161 if (!strcmp(opstr, ps->ops[i].string))
1162 return ps->ops[i].id;
1163 }
1164
1165 return OP_NONE;
1166 }
1167
1168 static inline void clear_operand_string(struct filter_parse_state *ps)
1169 {
1170 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1171 ps->operand.tail = 0;
1172 }
1173
1174 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1175 {
1176 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1177 return -EINVAL;
1178
1179 ps->operand.string[ps->operand.tail++] = c;
1180
1181 return 0;
1182 }
1183
1184 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1185 {
1186 struct opstack_op *opstack_op;
1187
1188 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1189 if (!opstack_op)
1190 return -ENOMEM;
1191
1192 opstack_op->op = op;
1193 list_add(&opstack_op->list, &ps->opstack);
1194
1195 return 0;
1196 }
1197
1198 static int filter_opstack_empty(struct filter_parse_state *ps)
1199 {
1200 return list_empty(&ps->opstack);
1201 }
1202
1203 static int filter_opstack_top(struct filter_parse_state *ps)
1204 {
1205 struct opstack_op *opstack_op;
1206
1207 if (filter_opstack_empty(ps))
1208 return OP_NONE;
1209
1210 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1211
1212 return opstack_op->op;
1213 }
1214
1215 static int filter_opstack_pop(struct filter_parse_state *ps)
1216 {
1217 struct opstack_op *opstack_op;
1218 int op;
1219
1220 if (filter_opstack_empty(ps))
1221 return OP_NONE;
1222
1223 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1224 op = opstack_op->op;
1225 list_del(&opstack_op->list);
1226
1227 kfree(opstack_op);
1228
1229 return op;
1230 }
1231
1232 static void filter_opstack_clear(struct filter_parse_state *ps)
1233 {
1234 while (!filter_opstack_empty(ps))
1235 filter_opstack_pop(ps);
1236 }
1237
1238 static char *curr_operand(struct filter_parse_state *ps)
1239 {
1240 return ps->operand.string;
1241 }
1242
1243 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1244 {
1245 struct postfix_elt *elt;
1246
1247 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1248 if (!elt)
1249 return -ENOMEM;
1250
1251 elt->op = OP_NONE;
1252 elt->operand = kstrdup(operand, GFP_KERNEL);
1253 if (!elt->operand) {
1254 kfree(elt);
1255 return -ENOMEM;
1256 }
1257
1258 list_add_tail(&elt->list, &ps->postfix);
1259
1260 return 0;
1261 }
1262
1263 static int postfix_append_op(struct filter_parse_state *ps, int op)
1264 {
1265 struct postfix_elt *elt;
1266
1267 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1268 if (!elt)
1269 return -ENOMEM;
1270
1271 elt->op = op;
1272 elt->operand = NULL;
1273
1274 list_add_tail(&elt->list, &ps->postfix);
1275
1276 return 0;
1277 }
1278
1279 static void postfix_clear(struct filter_parse_state *ps)
1280 {
1281 struct postfix_elt *elt;
1282
1283 while (!list_empty(&ps->postfix)) {
1284 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1285 list_del(&elt->list);
1286 kfree(elt->operand);
1287 kfree(elt);
1288 }
1289 }
1290
1291 static int filter_parse(struct filter_parse_state *ps)
1292 {
1293 int in_string = 0;
1294 int op, top_op;
1295 char ch;
1296
1297 while ((ch = infix_next(ps))) {
1298 if (ch == '"') {
1299 in_string ^= 1;
1300 continue;
1301 }
1302
1303 if (in_string)
1304 goto parse_operand;
1305
1306 if (isspace(ch))
1307 continue;
1308
1309 if (is_op_char(ps, ch)) {
1310 op = infix_get_op(ps, ch);
1311 if (op == OP_NONE) {
1312 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1313 return -EINVAL;
1314 }
1315
1316 if (strlen(curr_operand(ps))) {
1317 postfix_append_operand(ps, curr_operand(ps));
1318 clear_operand_string(ps);
1319 }
1320
1321 while (!filter_opstack_empty(ps)) {
1322 top_op = filter_opstack_top(ps);
1323 if (!is_precedence_lower(ps, top_op, op)) {
1324 top_op = filter_opstack_pop(ps);
1325 postfix_append_op(ps, top_op);
1326 continue;
1327 }
1328 break;
1329 }
1330
1331 filter_opstack_push(ps, op);
1332 continue;
1333 }
1334
1335 if (ch == '(') {
1336 filter_opstack_push(ps, OP_OPEN_PAREN);
1337 continue;
1338 }
1339
1340 if (ch == ')') {
1341 if (strlen(curr_operand(ps))) {
1342 postfix_append_operand(ps, curr_operand(ps));
1343 clear_operand_string(ps);
1344 }
1345
1346 top_op = filter_opstack_pop(ps);
1347 while (top_op != OP_NONE) {
1348 if (top_op == OP_OPEN_PAREN)
1349 break;
1350 postfix_append_op(ps, top_op);
1351 top_op = filter_opstack_pop(ps);
1352 }
1353 if (top_op == OP_NONE) {
1354 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1355 return -EINVAL;
1356 }
1357 continue;
1358 }
1359 parse_operand:
1360 if (append_operand_char(ps, ch)) {
1361 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1362 return -EINVAL;
1363 }
1364 }
1365
1366 if (strlen(curr_operand(ps)))
1367 postfix_append_operand(ps, curr_operand(ps));
1368
1369 while (!filter_opstack_empty(ps)) {
1370 top_op = filter_opstack_pop(ps);
1371 if (top_op == OP_NONE)
1372 break;
1373 if (top_op == OP_OPEN_PAREN) {
1374 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1375 return -EINVAL;
1376 }
1377 postfix_append_op(ps, top_op);
1378 }
1379
1380 return 0;
1381 }
1382
1383 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1384 struct trace_event_call *call,
1385 int op, char *operand1, char *operand2)
1386 {
1387 struct ftrace_event_field *field;
1388 static struct filter_pred pred;
1389
1390 memset(&pred, 0, sizeof(pred));
1391 pred.op = op;
1392
1393 if (op == OP_AND || op == OP_OR)
1394 return &pred;
1395
1396 if (!operand1 || !operand2) {
1397 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1398 return NULL;
1399 }
1400
1401 field = trace_find_event_field(call, operand1);
1402 if (!field) {
1403 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1404 return NULL;
1405 }
1406
1407 strcpy(pred.regex.pattern, operand2);
1408 pred.regex.len = strlen(pred.regex.pattern);
1409 pred.field = field;
1410 return init_pred(ps, field, &pred) ? NULL : &pred;
1411 }
1412
1413 static int check_preds(struct filter_parse_state *ps)
1414 {
1415 int n_normal_preds = 0, n_logical_preds = 0;
1416 struct postfix_elt *elt;
1417 int cnt = 0;
1418
1419 list_for_each_entry(elt, &ps->postfix, list) {
1420 if (elt->op == OP_NONE) {
1421 cnt++;
1422 continue;
1423 }
1424
1425 if (elt->op == OP_AND || elt->op == OP_OR) {
1426 n_logical_preds++;
1427 cnt--;
1428 continue;
1429 }
1430 if (elt->op != OP_NOT)
1431 cnt--;
1432 n_normal_preds++;
1433 /* all ops should have operands */
1434 if (cnt < 0)
1435 break;
1436 }
1437
1438 if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
1439 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1440 return -EINVAL;
1441 }
1442
1443 return 0;
1444 }
1445
1446 static int count_preds(struct filter_parse_state *ps)
1447 {
1448 struct postfix_elt *elt;
1449 int n_preds = 0;
1450
1451 list_for_each_entry(elt, &ps->postfix, list) {
1452 if (elt->op == OP_NONE)
1453 continue;
1454 n_preds++;
1455 }
1456
1457 return n_preds;
1458 }
1459
1460 struct check_pred_data {
1461 int count;
1462 int max;
1463 };
1464
1465 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1466 int *err, void *data)
1467 {
1468 struct check_pred_data *d = data;
1469
1470 if (WARN_ON(d->count++ > d->max)) {
1471 *err = -EINVAL;
1472 return WALK_PRED_ABORT;
1473 }
1474 return WALK_PRED_DEFAULT;
1475 }
1476
1477 /*
1478 * The tree is walked at filtering of an event. If the tree is not correctly
1479 * built, it may cause an infinite loop. Check here that the tree does
1480 * indeed terminate.
1481 */
1482 static int check_pred_tree(struct event_filter *filter,
1483 struct filter_pred *root)
1484 {
1485 struct check_pred_data data = {
1486 /*
1487 * The max that we can hit a node is three times.
1488 * Once going down, once coming up from left, and
1489 * once coming up from right. This is more than enough
1490 * since leafs are only hit a single time.
1491 */
1492 .max = 3 * filter->n_preds,
1493 .count = 0,
1494 };
1495
1496 return walk_pred_tree(filter->preds, root,
1497 check_pred_tree_cb, &data);
1498 }
1499
1500 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1501 int *err, void *data)
1502 {
1503 int *count = data;
1504
1505 if ((move == MOVE_DOWN) &&
1506 (pred->left == FILTER_PRED_INVALID))
1507 (*count)++;
1508
1509 return WALK_PRED_DEFAULT;
1510 }
1511
1512 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1513 {
1514 int count = 0, ret;
1515
1516 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1517 WARN_ON(ret);
1518 return count;
1519 }
1520
1521 struct fold_pred_data {
1522 struct filter_pred *root;
1523 int count;
1524 int children;
1525 };
1526
1527 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1528 int *err, void *data)
1529 {
1530 struct fold_pred_data *d = data;
1531 struct filter_pred *root = d->root;
1532
1533 if (move != MOVE_DOWN)
1534 return WALK_PRED_DEFAULT;
1535 if (pred->left != FILTER_PRED_INVALID)
1536 return WALK_PRED_DEFAULT;
1537
1538 if (WARN_ON(d->count == d->children)) {
1539 *err = -EINVAL;
1540 return WALK_PRED_ABORT;
1541 }
1542
1543 pred->index &= ~FILTER_PRED_FOLD;
1544 root->ops[d->count++] = pred->index;
1545 return WALK_PRED_DEFAULT;
1546 }
1547
1548 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1549 {
1550 struct fold_pred_data data = {
1551 .root = root,
1552 .count = 0,
1553 };
1554 int children;
1555
1556 /* No need to keep the fold flag */
1557 root->index &= ~FILTER_PRED_FOLD;
1558
1559 /* If the root is a leaf then do nothing */
1560 if (root->left == FILTER_PRED_INVALID)
1561 return 0;
1562
1563 /* count the children */
1564 children = count_leafs(preds, &preds[root->left]);
1565 children += count_leafs(preds, &preds[root->right]);
1566
1567 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1568 if (!root->ops)
1569 return -ENOMEM;
1570
1571 root->val = children;
1572 data.children = children;
1573 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1574 }
1575
1576 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1577 int *err, void *data)
1578 {
1579 struct filter_pred *preds = data;
1580
1581 if (move != MOVE_DOWN)
1582 return WALK_PRED_DEFAULT;
1583 if (!(pred->index & FILTER_PRED_FOLD))
1584 return WALK_PRED_DEFAULT;
1585
1586 *err = fold_pred(preds, pred);
1587 if (*err)
1588 return WALK_PRED_ABORT;
1589
1590 /* eveyrhing below is folded, continue with parent */
1591 return WALK_PRED_PARENT;
1592 }
1593
1594 /*
1595 * To optimize the processing of the ops, if we have several "ors" or
1596 * "ands" together, we can put them in an array and process them all
1597 * together speeding up the filter logic.
1598 */
1599 static int fold_pred_tree(struct event_filter *filter,
1600 struct filter_pred *root)
1601 {
1602 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1603 filter->preds);
1604 }
1605
1606 static int replace_preds(struct trace_event_call *call,
1607 struct event_filter *filter,
1608 struct filter_parse_state *ps,
1609 bool dry_run)
1610 {
1611 char *operand1 = NULL, *operand2 = NULL;
1612 struct filter_pred *pred;
1613 struct filter_pred *root;
1614 struct postfix_elt *elt;
1615 struct pred_stack stack = { }; /* init to NULL */
1616 int err;
1617 int n_preds = 0;
1618
1619 n_preds = count_preds(ps);
1620 if (n_preds >= MAX_FILTER_PRED) {
1621 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1622 return -ENOSPC;
1623 }
1624
1625 err = check_preds(ps);
1626 if (err)
1627 return err;
1628
1629 if (!dry_run) {
1630 err = __alloc_pred_stack(&stack, n_preds);
1631 if (err)
1632 return err;
1633 err = __alloc_preds(filter, n_preds);
1634 if (err)
1635 goto fail;
1636 }
1637
1638 n_preds = 0;
1639 list_for_each_entry(elt, &ps->postfix, list) {
1640 if (elt->op == OP_NONE) {
1641 if (!operand1)
1642 operand1 = elt->operand;
1643 else if (!operand2)
1644 operand2 = elt->operand;
1645 else {
1646 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1647 err = -EINVAL;
1648 goto fail;
1649 }
1650 continue;
1651 }
1652
1653 if (elt->op == OP_NOT) {
1654 if (!n_preds || operand1 || operand2) {
1655 parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
1656 err = -EINVAL;
1657 goto fail;
1658 }
1659 if (!dry_run)
1660 filter->preds[n_preds - 1].not ^= 1;
1661 continue;
1662 }
1663
1664 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1665 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1666 err = -ENOSPC;
1667 goto fail;
1668 }
1669
1670 pred = create_pred(ps, call, elt->op, operand1, operand2);
1671 if (!pred) {
1672 err = -EINVAL;
1673 goto fail;
1674 }
1675
1676 if (!dry_run) {
1677 err = filter_add_pred(ps, filter, pred, &stack);
1678 if (err)
1679 goto fail;
1680 }
1681
1682 operand1 = operand2 = NULL;
1683 }
1684
1685 if (!dry_run) {
1686 /* We should have one item left on the stack */
1687 pred = __pop_pred_stack(&stack);
1688 if (!pred)
1689 return -EINVAL;
1690 /* This item is where we start from in matching */
1691 root = pred;
1692 /* Make sure the stack is empty */
1693 pred = __pop_pred_stack(&stack);
1694 if (WARN_ON(pred)) {
1695 err = -EINVAL;
1696 filter->root = NULL;
1697 goto fail;
1698 }
1699 err = check_pred_tree(filter, root);
1700 if (err)
1701 goto fail;
1702
1703 /* Optimize the tree */
1704 err = fold_pred_tree(filter, root);
1705 if (err)
1706 goto fail;
1707
1708 /* We don't set root until we know it works */
1709 barrier();
1710 filter->root = root;
1711 }
1712
1713 err = 0;
1714 fail:
1715 __free_pred_stack(&stack);
1716 return err;
1717 }
1718
1719 static inline void event_set_filtered_flag(struct trace_event_file *file)
1720 {
1721 struct trace_event_call *call = file->event_call;
1722
1723 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1724 call->flags |= TRACE_EVENT_FL_FILTERED;
1725 else
1726 file->flags |= EVENT_FILE_FL_FILTERED;
1727 }
1728
1729 static inline void event_set_filter(struct trace_event_file *file,
1730 struct event_filter *filter)
1731 {
1732 struct trace_event_call *call = file->event_call;
1733
1734 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1735 rcu_assign_pointer(call->filter, filter);
1736 else
1737 rcu_assign_pointer(file->filter, filter);
1738 }
1739
1740 static inline void event_clear_filter(struct trace_event_file *file)
1741 {
1742 struct trace_event_call *call = file->event_call;
1743
1744 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1745 RCU_INIT_POINTER(call->filter, NULL);
1746 else
1747 RCU_INIT_POINTER(file->filter, NULL);
1748 }
1749
1750 static inline void
1751 event_set_no_set_filter_flag(struct trace_event_file *file)
1752 {
1753 struct trace_event_call *call = file->event_call;
1754
1755 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1756 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1757 else
1758 file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
1759 }
1760
1761 static inline void
1762 event_clear_no_set_filter_flag(struct trace_event_file *file)
1763 {
1764 struct trace_event_call *call = file->event_call;
1765
1766 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1767 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1768 else
1769 file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
1770 }
1771
1772 static inline bool
1773 event_no_set_filter_flag(struct trace_event_file *file)
1774 {
1775 struct trace_event_call *call = file->event_call;
1776
1777 if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
1778 return true;
1779
1780 if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
1781 (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
1782 return true;
1783
1784 return false;
1785 }
1786
1787 struct filter_list {
1788 struct list_head list;
1789 struct event_filter *filter;
1790 };
1791
1792 static int replace_system_preds(struct trace_subsystem_dir *dir,
1793 struct trace_array *tr,
1794 struct filter_parse_state *ps,
1795 char *filter_string)
1796 {
1797 struct trace_event_file *file;
1798 struct filter_list *filter_item;
1799 struct filter_list *tmp;
1800 LIST_HEAD(filter_list);
1801 bool fail = true;
1802 int err;
1803
1804 list_for_each_entry(file, &tr->events, list) {
1805 if (file->system != dir)
1806 continue;
1807
1808 /*
1809 * Try to see if the filter can be applied
1810 * (filter arg is ignored on dry_run)
1811 */
1812 err = replace_preds(file->event_call, NULL, ps, true);
1813 if (err)
1814 event_set_no_set_filter_flag(file);
1815 else
1816 event_clear_no_set_filter_flag(file);
1817 }
1818
1819 list_for_each_entry(file, &tr->events, list) {
1820 struct event_filter *filter;
1821
1822 if (file->system != dir)
1823 continue;
1824
1825 if (event_no_set_filter_flag(file))
1826 continue;
1827
1828 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1829 if (!filter_item)
1830 goto fail_mem;
1831
1832 list_add_tail(&filter_item->list, &filter_list);
1833
1834 filter_item->filter = __alloc_filter();
1835 if (!filter_item->filter)
1836 goto fail_mem;
1837 filter = filter_item->filter;
1838
1839 /* Can only fail on no memory */
1840 err = replace_filter_string(filter, filter_string);
1841 if (err)
1842 goto fail_mem;
1843
1844 err = replace_preds(file->event_call, filter, ps, false);
1845 if (err) {
1846 filter_disable(file);
1847 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1848 append_filter_err(ps, filter);
1849 } else
1850 event_set_filtered_flag(file);
1851 /*
1852 * Regardless of if this returned an error, we still
1853 * replace the filter for the call.
1854 */
1855 filter = event_filter(file);
1856 event_set_filter(file, filter_item->filter);
1857 filter_item->filter = filter;
1858
1859 fail = false;
1860 }
1861
1862 if (fail)
1863 goto fail;
1864
1865 /*
1866 * The calls can still be using the old filters.
1867 * Do a synchronize_sched() to ensure all calls are
1868 * done with them before we free them.
1869 */
1870 synchronize_sched();
1871 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1872 __free_filter(filter_item->filter);
1873 list_del(&filter_item->list);
1874 kfree(filter_item);
1875 }
1876 return 0;
1877 fail:
1878 /* No call succeeded */
1879 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1880 list_del(&filter_item->list);
1881 kfree(filter_item);
1882 }
1883 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1884 return -EINVAL;
1885 fail_mem:
1886 /* If any call succeeded, we still need to sync */
1887 if (!fail)
1888 synchronize_sched();
1889 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1890 __free_filter(filter_item->filter);
1891 list_del(&filter_item->list);
1892 kfree(filter_item);
1893 }
1894 return -ENOMEM;
1895 }
1896
1897 static int create_filter_start(char *filter_str, bool set_str,
1898 struct filter_parse_state **psp,
1899 struct event_filter **filterp)
1900 {
1901 struct event_filter *filter;
1902 struct filter_parse_state *ps = NULL;
1903 int err = 0;
1904
1905 WARN_ON_ONCE(*psp || *filterp);
1906
1907 /* allocate everything, and if any fails, free all and fail */
1908 filter = __alloc_filter();
1909 if (filter && set_str)
1910 err = replace_filter_string(filter, filter_str);
1911
1912 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1913
1914 if (!filter || !ps || err) {
1915 kfree(ps);
1916 __free_filter(filter);
1917 return -ENOMEM;
1918 }
1919
1920 /* we're committed to creating a new filter */
1921 *filterp = filter;
1922 *psp = ps;
1923
1924 parse_init(ps, filter_ops, filter_str);
1925 err = filter_parse(ps);
1926 if (err && set_str)
1927 append_filter_err(ps, filter);
1928 return err;
1929 }
1930
1931 static void create_filter_finish(struct filter_parse_state *ps)
1932 {
1933 if (ps) {
1934 filter_opstack_clear(ps);
1935 postfix_clear(ps);
1936 kfree(ps);
1937 }
1938 }
1939
1940 /**
1941 * create_filter - create a filter for a trace_event_call
1942 * @call: trace_event_call to create a filter for
1943 * @filter_str: filter string
1944 * @set_str: remember @filter_str and enable detailed error in filter
1945 * @filterp: out param for created filter (always updated on return)
1946 *
1947 * Creates a filter for @call with @filter_str. If @set_str is %true,
1948 * @filter_str is copied and recorded in the new filter.
1949 *
1950 * On success, returns 0 and *@filterp points to the new filter. On
1951 * failure, returns -errno and *@filterp may point to %NULL or to a new
1952 * filter. In the latter case, the returned filter contains error
1953 * information if @set_str is %true and the caller is responsible for
1954 * freeing it.
1955 */
1956 static int create_filter(struct trace_event_call *call,
1957 char *filter_str, bool set_str,
1958 struct event_filter **filterp)
1959 {
1960 struct event_filter *filter = NULL;
1961 struct filter_parse_state *ps = NULL;
1962 int err;
1963
1964 err = create_filter_start(filter_str, set_str, &ps, &filter);
1965 if (!err) {
1966 err = replace_preds(call, filter, ps, false);
1967 if (err && set_str)
1968 append_filter_err(ps, filter);
1969 }
1970 create_filter_finish(ps);
1971
1972 *filterp = filter;
1973 return err;
1974 }
1975
1976 int create_event_filter(struct trace_event_call *call,
1977 char *filter_str, bool set_str,
1978 struct event_filter **filterp)
1979 {
1980 return create_filter(call, filter_str, set_str, filterp);
1981 }
1982
1983 /**
1984 * create_system_filter - create a filter for an event_subsystem
1985 * @system: event_subsystem to create a filter for
1986 * @filter_str: filter string
1987 * @filterp: out param for created filter (always updated on return)
1988 *
1989 * Identical to create_filter() except that it creates a subsystem filter
1990 * and always remembers @filter_str.
1991 */
1992 static int create_system_filter(struct trace_subsystem_dir *dir,
1993 struct trace_array *tr,
1994 char *filter_str, struct event_filter **filterp)
1995 {
1996 struct event_filter *filter = NULL;
1997 struct filter_parse_state *ps = NULL;
1998 int err;
1999
2000 err = create_filter_start(filter_str, true, &ps, &filter);
2001 if (!err) {
2002 err = replace_system_preds(dir, tr, ps, filter_str);
2003 if (!err) {
2004 /* System filters just show a default message */
2005 kfree(filter->filter_string);
2006 filter->filter_string = NULL;
2007 } else {
2008 append_filter_err(ps, filter);
2009 }
2010 }
2011 create_filter_finish(ps);
2012
2013 *filterp = filter;
2014 return err;
2015 }
2016
2017 /* caller must hold event_mutex */
2018 int apply_event_filter(struct trace_event_file *file, char *filter_string)
2019 {
2020 struct trace_event_call *call = file->event_call;
2021 struct event_filter *filter;
2022 int err;
2023
2024 if (!strcmp(strstrip(filter_string), "0")) {
2025 filter_disable(file);
2026 filter = event_filter(file);
2027
2028 if (!filter)
2029 return 0;
2030
2031 event_clear_filter(file);
2032
2033 /* Make sure the filter is not being used */
2034 synchronize_sched();
2035 __free_filter(filter);
2036
2037 return 0;
2038 }
2039
2040 err = create_filter(call, filter_string, true, &filter);
2041
2042 /*
2043 * Always swap the call filter with the new filter
2044 * even if there was an error. If there was an error
2045 * in the filter, we disable the filter and show the error
2046 * string
2047 */
2048 if (filter) {
2049 struct event_filter *tmp;
2050
2051 tmp = event_filter(file);
2052 if (!err)
2053 event_set_filtered_flag(file);
2054 else
2055 filter_disable(file);
2056
2057 event_set_filter(file, filter);
2058
2059 if (tmp) {
2060 /* Make sure the call is done with the filter */
2061 synchronize_sched();
2062 __free_filter(tmp);
2063 }
2064 }
2065
2066 return err;
2067 }
2068
2069 int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
2070 char *filter_string)
2071 {
2072 struct event_subsystem *system = dir->subsystem;
2073 struct trace_array *tr = dir->tr;
2074 struct event_filter *filter;
2075 int err = 0;
2076
2077 mutex_lock(&event_mutex);
2078
2079 /* Make sure the system still has events */
2080 if (!dir->nr_events) {
2081 err = -ENODEV;
2082 goto out_unlock;
2083 }
2084
2085 if (!strcmp(strstrip(filter_string), "0")) {
2086 filter_free_subsystem_preds(dir, tr);
2087 remove_filter_string(system->filter);
2088 filter = system->filter;
2089 system->filter = NULL;
2090 /* Ensure all filters are no longer used */
2091 synchronize_sched();
2092 filter_free_subsystem_filters(dir, tr);
2093 __free_filter(filter);
2094 goto out_unlock;
2095 }
2096
2097 err = create_system_filter(dir, tr, filter_string, &filter);
2098 if (filter) {
2099 /*
2100 * No event actually uses the system filter
2101 * we can free it without synchronize_sched().
2102 */
2103 __free_filter(system->filter);
2104 system->filter = filter;
2105 }
2106 out_unlock:
2107 mutex_unlock(&event_mutex);
2108
2109 return err;
2110 }
2111
2112 #ifdef CONFIG_PERF_EVENTS
2113
2114 void ftrace_profile_free_filter(struct perf_event *event)
2115 {
2116 struct event_filter *filter = event->filter;
2117
2118 event->filter = NULL;
2119 __free_filter(filter);
2120 }
2121
2122 struct function_filter_data {
2123 struct ftrace_ops *ops;
2124 int first_filter;
2125 int first_notrace;
2126 };
2127
2128 #ifdef CONFIG_FUNCTION_TRACER
2129 static char **
2130 ftrace_function_filter_re(char *buf, int len, int *count)
2131 {
2132 char *str, **re;
2133
2134 str = kstrndup(buf, len, GFP_KERNEL);
2135 if (!str)
2136 return NULL;
2137
2138 /*
2139 * The argv_split function takes white space
2140 * as a separator, so convert ',' into spaces.
2141 */
2142 strreplace(str, ',', ' ');
2143
2144 re = argv_split(GFP_KERNEL, str, count);
2145 kfree(str);
2146 return re;
2147 }
2148
2149 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2150 int reset, char *re, int len)
2151 {
2152 int ret;
2153
2154 if (filter)
2155 ret = ftrace_set_filter(ops, re, len, reset);
2156 else
2157 ret = ftrace_set_notrace(ops, re, len, reset);
2158
2159 return ret;
2160 }
2161
2162 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2163 struct function_filter_data *data)
2164 {
2165 int i, re_cnt, ret = -EINVAL;
2166 int *reset;
2167 char **re;
2168
2169 reset = filter ? &data->first_filter : &data->first_notrace;
2170
2171 /*
2172 * The 'ip' field could have multiple filters set, separated
2173 * either by space or comma. We first cut the filter and apply
2174 * all pieces separatelly.
2175 */
2176 re = ftrace_function_filter_re(buf, len, &re_cnt);
2177 if (!re)
2178 return -EINVAL;
2179
2180 for (i = 0; i < re_cnt; i++) {
2181 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2182 re[i], strlen(re[i]));
2183 if (ret)
2184 break;
2185
2186 if (*reset)
2187 *reset = 0;
2188 }
2189
2190 argv_free(re);
2191 return ret;
2192 }
2193
2194 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2195 {
2196 struct ftrace_event_field *field = pred->field;
2197
2198 if (leaf) {
2199 /*
2200 * Check the leaf predicate for function trace, verify:
2201 * - only '==' and '!=' is used
2202 * - the 'ip' field is used
2203 */
2204 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2205 return -EINVAL;
2206
2207 if (strcmp(field->name, "ip"))
2208 return -EINVAL;
2209 } else {
2210 /*
2211 * Check the non leaf predicate for function trace, verify:
2212 * - only '||' is used
2213 */
2214 if (pred->op != OP_OR)
2215 return -EINVAL;
2216 }
2217
2218 return 0;
2219 }
2220
2221 static int ftrace_function_set_filter_cb(enum move_type move,
2222 struct filter_pred *pred,
2223 int *err, void *data)
2224 {
2225 /* Checking the node is valid for function trace. */
2226 if ((move != MOVE_DOWN) ||
2227 (pred->left != FILTER_PRED_INVALID)) {
2228 *err = ftrace_function_check_pred(pred, 0);
2229 } else {
2230 *err = ftrace_function_check_pred(pred, 1);
2231 if (*err)
2232 return WALK_PRED_ABORT;
2233
2234 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2235 pred->regex.pattern,
2236 pred->regex.len,
2237 data);
2238 }
2239
2240 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2241 }
2242
2243 static int ftrace_function_set_filter(struct perf_event *event,
2244 struct event_filter *filter)
2245 {
2246 struct function_filter_data data = {
2247 .first_filter = 1,
2248 .first_notrace = 1,
2249 .ops = &event->ftrace_ops,
2250 };
2251
2252 return walk_pred_tree(filter->preds, filter->root,
2253 ftrace_function_set_filter_cb, &data);
2254 }
2255 #else
2256 static int ftrace_function_set_filter(struct perf_event *event,
2257 struct event_filter *filter)
2258 {
2259 return -ENODEV;
2260 }
2261 #endif /* CONFIG_FUNCTION_TRACER */
2262
2263 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2264 char *filter_str)
2265 {
2266 int err;
2267 struct event_filter *filter;
2268 struct trace_event_call *call;
2269
2270 mutex_lock(&event_mutex);
2271
2272 call = event->tp_event;
2273
2274 err = -EINVAL;
2275 if (!call)
2276 goto out_unlock;
2277
2278 err = -EEXIST;
2279 if (event->filter)
2280 goto out_unlock;
2281
2282 err = create_filter(call, filter_str, false, &filter);
2283 if (err)
2284 goto free_filter;
2285
2286 if (ftrace_event_is_function(call))
2287 err = ftrace_function_set_filter(event, filter);
2288 else
2289 event->filter = filter;
2290
2291 free_filter:
2292 if (err || ftrace_event_is_function(call))
2293 __free_filter(filter);
2294
2295 out_unlock:
2296 mutex_unlock(&event_mutex);
2297
2298 return err;
2299 }
2300
2301 #endif /* CONFIG_PERF_EVENTS */
2302
2303 #ifdef CONFIG_FTRACE_STARTUP_TEST
2304
2305 #include <linux/types.h>
2306 #include <linux/tracepoint.h>
2307
2308 #define CREATE_TRACE_POINTS
2309 #include "trace_events_filter_test.h"
2310
2311 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2312 { \
2313 .filter = FILTER, \
2314 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2315 .e = ve, .f = vf, .g = vg, .h = vh }, \
2316 .match = m, \
2317 .not_visited = nvisit, \
2318 }
2319 #define YES 1
2320 #define NO 0
2321
2322 static struct test_filter_data_t {
2323 char *filter;
2324 struct trace_event_raw_ftrace_test_filter rec;
2325 int match;
2326 char *not_visited;
2327 } test_filter_data[] = {
2328 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2329 "e == 1 && f == 1 && g == 1 && h == 1"
2330 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2331 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2332 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2333 #undef FILTER
2334 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2335 "e == 1 || f == 1 || g == 1 || h == 1"
2336 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2337 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2338 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2339 #undef FILTER
2340 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2341 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2342 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2343 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2344 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2345 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2346 #undef FILTER
2347 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2348 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2349 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2350 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2351 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2352 #undef FILTER
2353 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2354 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2355 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2356 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2357 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2358 #undef FILTER
2359 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2360 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2361 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2362 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2363 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2364 #undef FILTER
2365 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2366 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2367 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2368 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2369 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2370 #undef FILTER
2371 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2372 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2373 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2374 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2375 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2376 };
2377
2378 #undef DATA_REC
2379 #undef FILTER
2380 #undef YES
2381 #undef NO
2382
2383 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2384
2385 static int test_pred_visited;
2386
2387 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2388 {
2389 struct ftrace_event_field *field = pred->field;
2390
2391 test_pred_visited = 1;
2392 printk(KERN_INFO "\npred visited %s\n", field->name);
2393 return 1;
2394 }
2395
2396 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2397 int *err, void *data)
2398 {
2399 char *fields = data;
2400
2401 if ((move == MOVE_DOWN) &&
2402 (pred->left == FILTER_PRED_INVALID)) {
2403 struct ftrace_event_field *field = pred->field;
2404
2405 if (!field) {
2406 WARN(1, "all leafs should have field defined");
2407 return WALK_PRED_DEFAULT;
2408 }
2409 if (!strchr(fields, *field->name))
2410 return WALK_PRED_DEFAULT;
2411
2412 WARN_ON(!pred->fn);
2413 pred->fn = test_pred_visited_fn;
2414 }
2415 return WALK_PRED_DEFAULT;
2416 }
2417
2418 static __init int ftrace_test_event_filter(void)
2419 {
2420 int i;
2421
2422 printk(KERN_INFO "Testing ftrace filter: ");
2423
2424 for (i = 0; i < DATA_CNT; i++) {
2425 struct event_filter *filter = NULL;
2426 struct test_filter_data_t *d = &test_filter_data[i];
2427 int err;
2428
2429 err = create_filter(&event_ftrace_test_filter, d->filter,
2430 false, &filter);
2431 if (err) {
2432 printk(KERN_INFO
2433 "Failed to get filter for '%s', err %d\n",
2434 d->filter, err);
2435 __free_filter(filter);
2436 break;
2437 }
2438
2439 /*
2440 * The preemption disabling is not really needed for self
2441 * tests, but the rcu dereference will complain without it.
2442 */
2443 preempt_disable();
2444 if (*d->not_visited)
2445 walk_pred_tree(filter->preds, filter->root,
2446 test_walk_pred_cb,
2447 d->not_visited);
2448
2449 test_pred_visited = 0;
2450 err = filter_match_preds(filter, &d->rec);
2451 preempt_enable();
2452
2453 __free_filter(filter);
2454
2455 if (test_pred_visited) {
2456 printk(KERN_INFO
2457 "Failed, unwanted pred visited for filter %s\n",
2458 d->filter);
2459 break;
2460 }
2461
2462 if (err != d->match) {
2463 printk(KERN_INFO
2464 "Failed to match filter '%s', expected %d\n",
2465 d->filter, d->match);
2466 break;
2467 }
2468 }
2469
2470 if (i == DATA_CNT)
2471 printk(KERN_CONT "OK\n");
2472
2473 return 0;
2474 }
2475
2476 late_initcall(ftrace_test_event_filter);
2477
2478 #endif /* CONFIG_FTRACE_STARTUP_TEST */
This page took 0.092264 seconds and 5 git commands to generate.