]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/trace/trace_events_filter.c
x86/cpu/amd: Give access to the number of nodes in a physical package
[mirror_ubuntu-artful-kernel.git] / kernel / trace / trace_events_filter.c
1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_BAND,
48 OP_NOT,
49 OP_NONE,
50 OP_OPEN_PAREN,
51 };
52
53 struct filter_op {
54 int id;
55 char *string;
56 int precedence;
57 };
58
59 /* Order must be the same as enum filter_op_ids above */
60 static struct filter_op filter_ops[] = {
61 { OP_OR, "||", 1 },
62 { OP_AND, "&&", 2 },
63 { OP_GLOB, "~", 4 },
64 { OP_NE, "!=", 4 },
65 { OP_EQ, "==", 4 },
66 { OP_LT, "<", 5 },
67 { OP_LE, "<=", 5 },
68 { OP_GT, ">", 5 },
69 { OP_GE, ">=", 5 },
70 { OP_BAND, "&", 6 },
71 { OP_NOT, "!", 6 },
72 { OP_NONE, "OP_NONE", 0 },
73 { OP_OPEN_PAREN, "(", 0 },
74 };
75
76 enum {
77 FILT_ERR_NONE,
78 FILT_ERR_INVALID_OP,
79 FILT_ERR_UNBALANCED_PAREN,
80 FILT_ERR_TOO_MANY_OPERANDS,
81 FILT_ERR_OPERAND_TOO_LONG,
82 FILT_ERR_FIELD_NOT_FOUND,
83 FILT_ERR_ILLEGAL_FIELD_OP,
84 FILT_ERR_ILLEGAL_INTVAL,
85 FILT_ERR_BAD_SUBSYS_FILTER,
86 FILT_ERR_TOO_MANY_PREDS,
87 FILT_ERR_MISSING_FIELD,
88 FILT_ERR_INVALID_FILTER,
89 FILT_ERR_IP_FIELD_ONLY,
90 FILT_ERR_ILLEGAL_NOT_OP,
91 };
92
93 static char *err_text[] = {
94 "No error",
95 "Invalid operator",
96 "Unbalanced parens",
97 "Too many operands",
98 "Operand too long",
99 "Field not found",
100 "Illegal operation for field type",
101 "Illegal integer value",
102 "Couldn't find or set field in one of a subsystem's events",
103 "Too many terms in predicate expression",
104 "Missing field name and/or value",
105 "Meaningless filter expression",
106 "Only 'ip' field is supported for function trace",
107 "Illegal use of '!'",
108 };
109
110 struct opstack_op {
111 int op;
112 struct list_head list;
113 };
114
115 struct postfix_elt {
116 int op;
117 char *operand;
118 struct list_head list;
119 };
120
121 struct filter_parse_state {
122 struct filter_op *ops;
123 struct list_head opstack;
124 struct list_head postfix;
125 int lasterr;
126 int lasterr_pos;
127
128 struct {
129 char *string;
130 unsigned int cnt;
131 unsigned int tail;
132 } infix;
133
134 struct {
135 char string[MAX_FILTER_STR_VAL];
136 int pos;
137 unsigned int tail;
138 } operand;
139 };
140
141 struct pred_stack {
142 struct filter_pred **preds;
143 int index;
144 };
145
146 /* If not of not match is equal to not of not, then it is a match */
147 #define DEFINE_COMPARISON_PRED(type) \
148 static int filter_pred_##type(struct filter_pred *pred, void *event) \
149 { \
150 type *addr = (type *)(event + pred->offset); \
151 type val = (type)pred->val; \
152 int match = 0; \
153 \
154 switch (pred->op) { \
155 case OP_LT: \
156 match = (*addr < val); \
157 break; \
158 case OP_LE: \
159 match = (*addr <= val); \
160 break; \
161 case OP_GT: \
162 match = (*addr > val); \
163 break; \
164 case OP_GE: \
165 match = (*addr >= val); \
166 break; \
167 case OP_BAND: \
168 match = (*addr & val); \
169 break; \
170 default: \
171 break; \
172 } \
173 \
174 return !!match == !pred->not; \
175 }
176
177 #define DEFINE_EQUALITY_PRED(size) \
178 static int filter_pred_##size(struct filter_pred *pred, void *event) \
179 { \
180 u##size *addr = (u##size *)(event + pred->offset); \
181 u##size val = (u##size)pred->val; \
182 int match; \
183 \
184 match = (val == *addr) ^ pred->not; \
185 \
186 return match; \
187 }
188
189 DEFINE_COMPARISON_PRED(s64);
190 DEFINE_COMPARISON_PRED(u64);
191 DEFINE_COMPARISON_PRED(s32);
192 DEFINE_COMPARISON_PRED(u32);
193 DEFINE_COMPARISON_PRED(s16);
194 DEFINE_COMPARISON_PRED(u16);
195 DEFINE_COMPARISON_PRED(s8);
196 DEFINE_COMPARISON_PRED(u8);
197
198 DEFINE_EQUALITY_PRED(64);
199 DEFINE_EQUALITY_PRED(32);
200 DEFINE_EQUALITY_PRED(16);
201 DEFINE_EQUALITY_PRED(8);
202
203 /* Filter predicate for fixed sized arrays of characters */
204 static int filter_pred_string(struct filter_pred *pred, void *event)
205 {
206 char *addr = (char *)(event + pred->offset);
207 int cmp, match;
208
209 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
210
211 match = cmp ^ pred->not;
212
213 return match;
214 }
215
216 /* Filter predicate for char * pointers */
217 static int filter_pred_pchar(struct filter_pred *pred, void *event)
218 {
219 char **addr = (char **)(event + pred->offset);
220 int cmp, match;
221 int len = strlen(*addr) + 1; /* including tailing '\0' */
222
223 cmp = pred->regex.match(*addr, &pred->regex, len);
224
225 match = cmp ^ pred->not;
226
227 return match;
228 }
229
230 /*
231 * Filter predicate for dynamic sized arrays of characters.
232 * These are implemented through a list of strings at the end
233 * of the entry.
234 * Also each of these strings have a field in the entry which
235 * contains its offset from the beginning of the entry.
236 * We have then first to get this field, dereference it
237 * and add it to the address of the entry, and at last we have
238 * the address of the string.
239 */
240 static int filter_pred_strloc(struct filter_pred *pred, void *event)
241 {
242 u32 str_item = *(u32 *)(event + pred->offset);
243 int str_loc = str_item & 0xffff;
244 int str_len = str_item >> 16;
245 char *addr = (char *)(event + str_loc);
246 int cmp, match;
247
248 cmp = pred->regex.match(addr, &pred->regex, str_len);
249
250 match = cmp ^ pred->not;
251
252 return match;
253 }
254
255 static int filter_pred_none(struct filter_pred *pred, void *event)
256 {
257 return 0;
258 }
259
260 /*
261 * regex_match_foo - Basic regex callbacks
262 *
263 * @str: the string to be searched
264 * @r: the regex structure containing the pattern string
265 * @len: the length of the string to be searched (including '\0')
266 *
267 * Note:
268 * - @str might not be NULL-terminated if it's of type DYN_STRING
269 * or STATIC_STRING
270 */
271
272 static int regex_match_full(char *str, struct regex *r, int len)
273 {
274 if (strncmp(str, r->pattern, len) == 0)
275 return 1;
276 return 0;
277 }
278
279 static int regex_match_front(char *str, struct regex *r, int len)
280 {
281 if (strncmp(str, r->pattern, r->len) == 0)
282 return 1;
283 return 0;
284 }
285
286 static int regex_match_middle(char *str, struct regex *r, int len)
287 {
288 if (strnstr(str, r->pattern, len))
289 return 1;
290 return 0;
291 }
292
293 static int regex_match_end(char *str, struct regex *r, int len)
294 {
295 int strlen = len - 1;
296
297 if (strlen >= r->len &&
298 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
299 return 1;
300 return 0;
301 }
302
303 /**
304 * filter_parse_regex - parse a basic regex
305 * @buff: the raw regex
306 * @len: length of the regex
307 * @search: will point to the beginning of the string to compare
308 * @not: tell whether the match will have to be inverted
309 *
310 * This passes in a buffer containing a regex and this function will
311 * set search to point to the search part of the buffer and
312 * return the type of search it is (see enum above).
313 * This does modify buff.
314 *
315 * Returns enum type.
316 * search returns the pointer to use for comparison.
317 * not returns 1 if buff started with a '!'
318 * 0 otherwise.
319 */
320 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
321 {
322 int type = MATCH_FULL;
323 int i;
324
325 if (buff[0] == '!') {
326 *not = 1;
327 buff++;
328 len--;
329 } else
330 *not = 0;
331
332 *search = buff;
333
334 for (i = 0; i < len; i++) {
335 if (buff[i] == '*') {
336 if (!i) {
337 *search = buff + 1;
338 type = MATCH_END_ONLY;
339 } else {
340 if (type == MATCH_END_ONLY)
341 type = MATCH_MIDDLE_ONLY;
342 else
343 type = MATCH_FRONT_ONLY;
344 buff[i] = 0;
345 break;
346 }
347 }
348 }
349
350 return type;
351 }
352
353 static void filter_build_regex(struct filter_pred *pred)
354 {
355 struct regex *r = &pred->regex;
356 char *search;
357 enum regex_type type = MATCH_FULL;
358 int not = 0;
359
360 if (pred->op == OP_GLOB) {
361 type = filter_parse_regex(r->pattern, r->len, &search, &not);
362 r->len = strlen(search);
363 memmove(r->pattern, search, r->len+1);
364 }
365
366 switch (type) {
367 case MATCH_FULL:
368 r->match = regex_match_full;
369 break;
370 case MATCH_FRONT_ONLY:
371 r->match = regex_match_front;
372 break;
373 case MATCH_MIDDLE_ONLY:
374 r->match = regex_match_middle;
375 break;
376 case MATCH_END_ONLY:
377 r->match = regex_match_end;
378 break;
379 }
380
381 pred->not ^= not;
382 }
383
384 enum move_type {
385 MOVE_DOWN,
386 MOVE_UP_FROM_LEFT,
387 MOVE_UP_FROM_RIGHT
388 };
389
390 static struct filter_pred *
391 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
392 int index, enum move_type *move)
393 {
394 if (pred->parent & FILTER_PRED_IS_RIGHT)
395 *move = MOVE_UP_FROM_RIGHT;
396 else
397 *move = MOVE_UP_FROM_LEFT;
398 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
399
400 return pred;
401 }
402
403 enum walk_return {
404 WALK_PRED_ABORT,
405 WALK_PRED_PARENT,
406 WALK_PRED_DEFAULT,
407 };
408
409 typedef int (*filter_pred_walkcb_t) (enum move_type move,
410 struct filter_pred *pred,
411 int *err, void *data);
412
413 static int walk_pred_tree(struct filter_pred *preds,
414 struct filter_pred *root,
415 filter_pred_walkcb_t cb, void *data)
416 {
417 struct filter_pred *pred = root;
418 enum move_type move = MOVE_DOWN;
419 int done = 0;
420
421 if (!preds)
422 return -EINVAL;
423
424 do {
425 int err = 0, ret;
426
427 ret = cb(move, pred, &err, data);
428 if (ret == WALK_PRED_ABORT)
429 return err;
430 if (ret == WALK_PRED_PARENT)
431 goto get_parent;
432
433 switch (move) {
434 case MOVE_DOWN:
435 if (pred->left != FILTER_PRED_INVALID) {
436 pred = &preds[pred->left];
437 continue;
438 }
439 goto get_parent;
440 case MOVE_UP_FROM_LEFT:
441 pred = &preds[pred->right];
442 move = MOVE_DOWN;
443 continue;
444 case MOVE_UP_FROM_RIGHT:
445 get_parent:
446 if (pred == root)
447 break;
448 pred = get_pred_parent(pred, preds,
449 pred->parent,
450 &move);
451 continue;
452 }
453 done = 1;
454 } while (!done);
455
456 /* We are fine. */
457 return 0;
458 }
459
460 /*
461 * A series of AND or ORs where found together. Instead of
462 * climbing up and down the tree branches, an array of the
463 * ops were made in order of checks. We can just move across
464 * the array and short circuit if needed.
465 */
466 static int process_ops(struct filter_pred *preds,
467 struct filter_pred *op, void *rec)
468 {
469 struct filter_pred *pred;
470 int match = 0;
471 int type;
472 int i;
473
474 /*
475 * Micro-optimization: We set type to true if op
476 * is an OR and false otherwise (AND). Then we
477 * just need to test if the match is equal to
478 * the type, and if it is, we can short circuit the
479 * rest of the checks:
480 *
481 * if ((match && op->op == OP_OR) ||
482 * (!match && op->op == OP_AND))
483 * return match;
484 */
485 type = op->op == OP_OR;
486
487 for (i = 0; i < op->val; i++) {
488 pred = &preds[op->ops[i]];
489 if (!WARN_ON_ONCE(!pred->fn))
490 match = pred->fn(pred, rec);
491 if (!!match == type)
492 break;
493 }
494 /* If not of not match is equal to not of not, then it is a match */
495 return !!match == !op->not;
496 }
497
498 struct filter_match_preds_data {
499 struct filter_pred *preds;
500 int match;
501 void *rec;
502 };
503
504 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
505 int *err, void *data)
506 {
507 struct filter_match_preds_data *d = data;
508
509 *err = 0;
510 switch (move) {
511 case MOVE_DOWN:
512 /* only AND and OR have children */
513 if (pred->left != FILTER_PRED_INVALID) {
514 /* If ops is set, then it was folded. */
515 if (!pred->ops)
516 return WALK_PRED_DEFAULT;
517 /* We can treat folded ops as a leaf node */
518 d->match = process_ops(d->preds, pred, d->rec);
519 } else {
520 if (!WARN_ON_ONCE(!pred->fn))
521 d->match = pred->fn(pred, d->rec);
522 }
523
524 return WALK_PRED_PARENT;
525 case MOVE_UP_FROM_LEFT:
526 /*
527 * Check for short circuits.
528 *
529 * Optimization: !!match == (pred->op == OP_OR)
530 * is the same as:
531 * if ((match && pred->op == OP_OR) ||
532 * (!match && pred->op == OP_AND))
533 */
534 if (!!d->match == (pred->op == OP_OR))
535 return WALK_PRED_PARENT;
536 break;
537 case MOVE_UP_FROM_RIGHT:
538 break;
539 }
540
541 return WALK_PRED_DEFAULT;
542 }
543
544 /* return 1 if event matches, 0 otherwise (discard) */
545 int filter_match_preds(struct event_filter *filter, void *rec)
546 {
547 struct filter_pred *preds;
548 struct filter_pred *root;
549 struct filter_match_preds_data data = {
550 /* match is currently meaningless */
551 .match = -1,
552 .rec = rec,
553 };
554 int n_preds, ret;
555
556 /* no filter is considered a match */
557 if (!filter)
558 return 1;
559
560 n_preds = filter->n_preds;
561 if (!n_preds)
562 return 1;
563
564 /*
565 * n_preds, root and filter->preds are protect with preemption disabled.
566 */
567 root = rcu_dereference_sched(filter->root);
568 if (!root)
569 return 1;
570
571 data.preds = preds = rcu_dereference_sched(filter->preds);
572 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
573 WARN_ON(ret);
574 return data.match;
575 }
576 EXPORT_SYMBOL_GPL(filter_match_preds);
577
578 static void parse_error(struct filter_parse_state *ps, int err, int pos)
579 {
580 ps->lasterr = err;
581 ps->lasterr_pos = pos;
582 }
583
584 static void remove_filter_string(struct event_filter *filter)
585 {
586 if (!filter)
587 return;
588
589 kfree(filter->filter_string);
590 filter->filter_string = NULL;
591 }
592
593 static int replace_filter_string(struct event_filter *filter,
594 char *filter_string)
595 {
596 kfree(filter->filter_string);
597 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
598 if (!filter->filter_string)
599 return -ENOMEM;
600
601 return 0;
602 }
603
604 static int append_filter_string(struct event_filter *filter,
605 char *string)
606 {
607 int newlen;
608 char *new_filter_string;
609
610 BUG_ON(!filter->filter_string);
611 newlen = strlen(filter->filter_string) + strlen(string) + 1;
612 new_filter_string = kmalloc(newlen, GFP_KERNEL);
613 if (!new_filter_string)
614 return -ENOMEM;
615
616 strcpy(new_filter_string, filter->filter_string);
617 strcat(new_filter_string, string);
618 kfree(filter->filter_string);
619 filter->filter_string = new_filter_string;
620
621 return 0;
622 }
623
624 static void append_filter_err(struct filter_parse_state *ps,
625 struct event_filter *filter)
626 {
627 int pos = ps->lasterr_pos;
628 char *buf, *pbuf;
629
630 buf = (char *)__get_free_page(GFP_TEMPORARY);
631 if (!buf)
632 return;
633
634 append_filter_string(filter, "\n");
635 memset(buf, ' ', PAGE_SIZE);
636 if (pos > PAGE_SIZE - 128)
637 pos = 0;
638 buf[pos] = '^';
639 pbuf = &buf[pos] + 1;
640
641 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
642 append_filter_string(filter, buf);
643 free_page((unsigned long) buf);
644 }
645
646 static inline struct event_filter *event_filter(struct ftrace_event_file *file)
647 {
648 if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
649 return file->event_call->filter;
650 else
651 return file->filter;
652 }
653
654 /* caller must hold event_mutex */
655 void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s)
656 {
657 struct event_filter *filter = event_filter(file);
658
659 if (filter && filter->filter_string)
660 trace_seq_printf(s, "%s\n", filter->filter_string);
661 else
662 trace_seq_puts(s, "none\n");
663 }
664
665 void print_subsystem_event_filter(struct event_subsystem *system,
666 struct trace_seq *s)
667 {
668 struct event_filter *filter;
669
670 mutex_lock(&event_mutex);
671 filter = system->filter;
672 if (filter && filter->filter_string)
673 trace_seq_printf(s, "%s\n", filter->filter_string);
674 else
675 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
676 mutex_unlock(&event_mutex);
677 }
678
679 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
680 {
681 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
682 if (!stack->preds)
683 return -ENOMEM;
684 stack->index = n_preds;
685 return 0;
686 }
687
688 static void __free_pred_stack(struct pred_stack *stack)
689 {
690 kfree(stack->preds);
691 stack->index = 0;
692 }
693
694 static int __push_pred_stack(struct pred_stack *stack,
695 struct filter_pred *pred)
696 {
697 int index = stack->index;
698
699 if (WARN_ON(index == 0))
700 return -ENOSPC;
701
702 stack->preds[--index] = pred;
703 stack->index = index;
704 return 0;
705 }
706
707 static struct filter_pred *
708 __pop_pred_stack(struct pred_stack *stack)
709 {
710 struct filter_pred *pred;
711 int index = stack->index;
712
713 pred = stack->preds[index++];
714 if (!pred)
715 return NULL;
716
717 stack->index = index;
718 return pred;
719 }
720
721 static int filter_set_pred(struct event_filter *filter,
722 int idx,
723 struct pred_stack *stack,
724 struct filter_pred *src)
725 {
726 struct filter_pred *dest = &filter->preds[idx];
727 struct filter_pred *left;
728 struct filter_pred *right;
729
730 *dest = *src;
731 dest->index = idx;
732
733 if (dest->op == OP_OR || dest->op == OP_AND) {
734 right = __pop_pred_stack(stack);
735 left = __pop_pred_stack(stack);
736 if (!left || !right)
737 return -EINVAL;
738 /*
739 * If both children can be folded
740 * and they are the same op as this op or a leaf,
741 * then this op can be folded.
742 */
743 if (left->index & FILTER_PRED_FOLD &&
744 ((left->op == dest->op && !left->not) ||
745 left->left == FILTER_PRED_INVALID) &&
746 right->index & FILTER_PRED_FOLD &&
747 ((right->op == dest->op && !right->not) ||
748 right->left == FILTER_PRED_INVALID))
749 dest->index |= FILTER_PRED_FOLD;
750
751 dest->left = left->index & ~FILTER_PRED_FOLD;
752 dest->right = right->index & ~FILTER_PRED_FOLD;
753 left->parent = dest->index & ~FILTER_PRED_FOLD;
754 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
755 } else {
756 /*
757 * Make dest->left invalid to be used as a quick
758 * way to know this is a leaf node.
759 */
760 dest->left = FILTER_PRED_INVALID;
761
762 /* All leafs allow folding the parent ops. */
763 dest->index |= FILTER_PRED_FOLD;
764 }
765
766 return __push_pred_stack(stack, dest);
767 }
768
769 static void __free_preds(struct event_filter *filter)
770 {
771 int i;
772
773 if (filter->preds) {
774 for (i = 0; i < filter->n_preds; i++)
775 kfree(filter->preds[i].ops);
776 kfree(filter->preds);
777 filter->preds = NULL;
778 }
779 filter->a_preds = 0;
780 filter->n_preds = 0;
781 }
782
783 static void filter_disable(struct ftrace_event_file *file)
784 {
785 struct ftrace_event_call *call = file->event_call;
786
787 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
788 call->flags &= ~TRACE_EVENT_FL_FILTERED;
789 else
790 file->flags &= ~FTRACE_EVENT_FL_FILTERED;
791 }
792
793 static void __free_filter(struct event_filter *filter)
794 {
795 if (!filter)
796 return;
797
798 __free_preds(filter);
799 kfree(filter->filter_string);
800 kfree(filter);
801 }
802
803 void free_event_filter(struct event_filter *filter)
804 {
805 __free_filter(filter);
806 }
807
808 static struct event_filter *__alloc_filter(void)
809 {
810 struct event_filter *filter;
811
812 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
813 return filter;
814 }
815
816 static int __alloc_preds(struct event_filter *filter, int n_preds)
817 {
818 struct filter_pred *pred;
819 int i;
820
821 if (filter->preds)
822 __free_preds(filter);
823
824 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
825
826 if (!filter->preds)
827 return -ENOMEM;
828
829 filter->a_preds = n_preds;
830 filter->n_preds = 0;
831
832 for (i = 0; i < n_preds; i++) {
833 pred = &filter->preds[i];
834 pred->fn = filter_pred_none;
835 }
836
837 return 0;
838 }
839
840 static inline void __remove_filter(struct ftrace_event_file *file)
841 {
842 struct ftrace_event_call *call = file->event_call;
843
844 filter_disable(file);
845 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
846 remove_filter_string(call->filter);
847 else
848 remove_filter_string(file->filter);
849 }
850
851 static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir,
852 struct trace_array *tr)
853 {
854 struct ftrace_event_file *file;
855
856 list_for_each_entry(file, &tr->events, list) {
857 if (file->system != dir)
858 continue;
859 __remove_filter(file);
860 }
861 }
862
863 static inline void __free_subsystem_filter(struct ftrace_event_file *file)
864 {
865 struct ftrace_event_call *call = file->event_call;
866
867 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
868 __free_filter(call->filter);
869 call->filter = NULL;
870 } else {
871 __free_filter(file->filter);
872 file->filter = NULL;
873 }
874 }
875
876 static void filter_free_subsystem_filters(struct ftrace_subsystem_dir *dir,
877 struct trace_array *tr)
878 {
879 struct ftrace_event_file *file;
880
881 list_for_each_entry(file, &tr->events, list) {
882 if (file->system != dir)
883 continue;
884 __free_subsystem_filter(file);
885 }
886 }
887
888 static int filter_add_pred(struct filter_parse_state *ps,
889 struct event_filter *filter,
890 struct filter_pred *pred,
891 struct pred_stack *stack)
892 {
893 int err;
894
895 if (WARN_ON(filter->n_preds == filter->a_preds)) {
896 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
897 return -ENOSPC;
898 }
899
900 err = filter_set_pred(filter, filter->n_preds, stack, pred);
901 if (err)
902 return err;
903
904 filter->n_preds++;
905
906 return 0;
907 }
908
909 int filter_assign_type(const char *type)
910 {
911 if (strstr(type, "__data_loc") && strstr(type, "char"))
912 return FILTER_DYN_STRING;
913
914 if (strchr(type, '[') && strstr(type, "char"))
915 return FILTER_STATIC_STRING;
916
917 return FILTER_OTHER;
918 }
919
920 static bool is_function_field(struct ftrace_event_field *field)
921 {
922 return field->filter_type == FILTER_TRACE_FN;
923 }
924
925 static bool is_string_field(struct ftrace_event_field *field)
926 {
927 return field->filter_type == FILTER_DYN_STRING ||
928 field->filter_type == FILTER_STATIC_STRING ||
929 field->filter_type == FILTER_PTR_STRING;
930 }
931
932 static int is_legal_op(struct ftrace_event_field *field, int op)
933 {
934 if (is_string_field(field) &&
935 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
936 return 0;
937 if (!is_string_field(field) && op == OP_GLOB)
938 return 0;
939
940 return 1;
941 }
942
943 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
944 int field_is_signed)
945 {
946 filter_pred_fn_t fn = NULL;
947
948 switch (field_size) {
949 case 8:
950 if (op == OP_EQ || op == OP_NE)
951 fn = filter_pred_64;
952 else if (field_is_signed)
953 fn = filter_pred_s64;
954 else
955 fn = filter_pred_u64;
956 break;
957 case 4:
958 if (op == OP_EQ || op == OP_NE)
959 fn = filter_pred_32;
960 else if (field_is_signed)
961 fn = filter_pred_s32;
962 else
963 fn = filter_pred_u32;
964 break;
965 case 2:
966 if (op == OP_EQ || op == OP_NE)
967 fn = filter_pred_16;
968 else if (field_is_signed)
969 fn = filter_pred_s16;
970 else
971 fn = filter_pred_u16;
972 break;
973 case 1:
974 if (op == OP_EQ || op == OP_NE)
975 fn = filter_pred_8;
976 else if (field_is_signed)
977 fn = filter_pred_s8;
978 else
979 fn = filter_pred_u8;
980 break;
981 }
982
983 return fn;
984 }
985
986 static int init_pred(struct filter_parse_state *ps,
987 struct ftrace_event_field *field,
988 struct filter_pred *pred)
989
990 {
991 filter_pred_fn_t fn = filter_pred_none;
992 unsigned long long val;
993 int ret;
994
995 pred->offset = field->offset;
996
997 if (!is_legal_op(field, pred->op)) {
998 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
999 return -EINVAL;
1000 }
1001
1002 if (is_string_field(field)) {
1003 filter_build_regex(pred);
1004
1005 if (field->filter_type == FILTER_STATIC_STRING) {
1006 fn = filter_pred_string;
1007 pred->regex.field_len = field->size;
1008 } else if (field->filter_type == FILTER_DYN_STRING)
1009 fn = filter_pred_strloc;
1010 else
1011 fn = filter_pred_pchar;
1012 } else if (is_function_field(field)) {
1013 if (strcmp(field->name, "ip")) {
1014 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1015 return -EINVAL;
1016 }
1017 } else {
1018 if (field->is_signed)
1019 ret = kstrtoll(pred->regex.pattern, 0, &val);
1020 else
1021 ret = kstrtoull(pred->regex.pattern, 0, &val);
1022 if (ret) {
1023 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1024 return -EINVAL;
1025 }
1026 pred->val = val;
1027
1028 fn = select_comparison_fn(pred->op, field->size,
1029 field->is_signed);
1030 if (!fn) {
1031 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1032 return -EINVAL;
1033 }
1034 }
1035
1036 if (pred->op == OP_NE)
1037 pred->not ^= 1;
1038
1039 pred->fn = fn;
1040 return 0;
1041 }
1042
1043 static void parse_init(struct filter_parse_state *ps,
1044 struct filter_op *ops,
1045 char *infix_string)
1046 {
1047 memset(ps, '\0', sizeof(*ps));
1048
1049 ps->infix.string = infix_string;
1050 ps->infix.cnt = strlen(infix_string);
1051 ps->ops = ops;
1052
1053 INIT_LIST_HEAD(&ps->opstack);
1054 INIT_LIST_HEAD(&ps->postfix);
1055 }
1056
1057 static char infix_next(struct filter_parse_state *ps)
1058 {
1059 ps->infix.cnt--;
1060
1061 return ps->infix.string[ps->infix.tail++];
1062 }
1063
1064 static char infix_peek(struct filter_parse_state *ps)
1065 {
1066 if (ps->infix.tail == strlen(ps->infix.string))
1067 return 0;
1068
1069 return ps->infix.string[ps->infix.tail];
1070 }
1071
1072 static void infix_advance(struct filter_parse_state *ps)
1073 {
1074 ps->infix.cnt--;
1075 ps->infix.tail++;
1076 }
1077
1078 static inline int is_precedence_lower(struct filter_parse_state *ps,
1079 int a, int b)
1080 {
1081 return ps->ops[a].precedence < ps->ops[b].precedence;
1082 }
1083
1084 static inline int is_op_char(struct filter_parse_state *ps, char c)
1085 {
1086 int i;
1087
1088 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1089 if (ps->ops[i].string[0] == c)
1090 return 1;
1091 }
1092
1093 return 0;
1094 }
1095
1096 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1097 {
1098 char nextc = infix_peek(ps);
1099 char opstr[3];
1100 int i;
1101
1102 opstr[0] = firstc;
1103 opstr[1] = nextc;
1104 opstr[2] = '\0';
1105
1106 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1107 if (!strcmp(opstr, ps->ops[i].string)) {
1108 infix_advance(ps);
1109 return ps->ops[i].id;
1110 }
1111 }
1112
1113 opstr[1] = '\0';
1114
1115 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1116 if (!strcmp(opstr, ps->ops[i].string))
1117 return ps->ops[i].id;
1118 }
1119
1120 return OP_NONE;
1121 }
1122
1123 static inline void clear_operand_string(struct filter_parse_state *ps)
1124 {
1125 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1126 ps->operand.tail = 0;
1127 }
1128
1129 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1130 {
1131 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1132 return -EINVAL;
1133
1134 ps->operand.string[ps->operand.tail++] = c;
1135
1136 return 0;
1137 }
1138
1139 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1140 {
1141 struct opstack_op *opstack_op;
1142
1143 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1144 if (!opstack_op)
1145 return -ENOMEM;
1146
1147 opstack_op->op = op;
1148 list_add(&opstack_op->list, &ps->opstack);
1149
1150 return 0;
1151 }
1152
1153 static int filter_opstack_empty(struct filter_parse_state *ps)
1154 {
1155 return list_empty(&ps->opstack);
1156 }
1157
1158 static int filter_opstack_top(struct filter_parse_state *ps)
1159 {
1160 struct opstack_op *opstack_op;
1161
1162 if (filter_opstack_empty(ps))
1163 return OP_NONE;
1164
1165 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1166
1167 return opstack_op->op;
1168 }
1169
1170 static int filter_opstack_pop(struct filter_parse_state *ps)
1171 {
1172 struct opstack_op *opstack_op;
1173 int op;
1174
1175 if (filter_opstack_empty(ps))
1176 return OP_NONE;
1177
1178 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1179 op = opstack_op->op;
1180 list_del(&opstack_op->list);
1181
1182 kfree(opstack_op);
1183
1184 return op;
1185 }
1186
1187 static void filter_opstack_clear(struct filter_parse_state *ps)
1188 {
1189 while (!filter_opstack_empty(ps))
1190 filter_opstack_pop(ps);
1191 }
1192
1193 static char *curr_operand(struct filter_parse_state *ps)
1194 {
1195 return ps->operand.string;
1196 }
1197
1198 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1199 {
1200 struct postfix_elt *elt;
1201
1202 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1203 if (!elt)
1204 return -ENOMEM;
1205
1206 elt->op = OP_NONE;
1207 elt->operand = kstrdup(operand, GFP_KERNEL);
1208 if (!elt->operand) {
1209 kfree(elt);
1210 return -ENOMEM;
1211 }
1212
1213 list_add_tail(&elt->list, &ps->postfix);
1214
1215 return 0;
1216 }
1217
1218 static int postfix_append_op(struct filter_parse_state *ps, int op)
1219 {
1220 struct postfix_elt *elt;
1221
1222 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1223 if (!elt)
1224 return -ENOMEM;
1225
1226 elt->op = op;
1227 elt->operand = NULL;
1228
1229 list_add_tail(&elt->list, &ps->postfix);
1230
1231 return 0;
1232 }
1233
1234 static void postfix_clear(struct filter_parse_state *ps)
1235 {
1236 struct postfix_elt *elt;
1237
1238 while (!list_empty(&ps->postfix)) {
1239 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1240 list_del(&elt->list);
1241 kfree(elt->operand);
1242 kfree(elt);
1243 }
1244 }
1245
1246 static int filter_parse(struct filter_parse_state *ps)
1247 {
1248 int in_string = 0;
1249 int op, top_op;
1250 char ch;
1251
1252 while ((ch = infix_next(ps))) {
1253 if (ch == '"') {
1254 in_string ^= 1;
1255 continue;
1256 }
1257
1258 if (in_string)
1259 goto parse_operand;
1260
1261 if (isspace(ch))
1262 continue;
1263
1264 if (is_op_char(ps, ch)) {
1265 op = infix_get_op(ps, ch);
1266 if (op == OP_NONE) {
1267 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1268 return -EINVAL;
1269 }
1270
1271 if (strlen(curr_operand(ps))) {
1272 postfix_append_operand(ps, curr_operand(ps));
1273 clear_operand_string(ps);
1274 }
1275
1276 while (!filter_opstack_empty(ps)) {
1277 top_op = filter_opstack_top(ps);
1278 if (!is_precedence_lower(ps, top_op, op)) {
1279 top_op = filter_opstack_pop(ps);
1280 postfix_append_op(ps, top_op);
1281 continue;
1282 }
1283 break;
1284 }
1285
1286 filter_opstack_push(ps, op);
1287 continue;
1288 }
1289
1290 if (ch == '(') {
1291 filter_opstack_push(ps, OP_OPEN_PAREN);
1292 continue;
1293 }
1294
1295 if (ch == ')') {
1296 if (strlen(curr_operand(ps))) {
1297 postfix_append_operand(ps, curr_operand(ps));
1298 clear_operand_string(ps);
1299 }
1300
1301 top_op = filter_opstack_pop(ps);
1302 while (top_op != OP_NONE) {
1303 if (top_op == OP_OPEN_PAREN)
1304 break;
1305 postfix_append_op(ps, top_op);
1306 top_op = filter_opstack_pop(ps);
1307 }
1308 if (top_op == OP_NONE) {
1309 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1310 return -EINVAL;
1311 }
1312 continue;
1313 }
1314 parse_operand:
1315 if (append_operand_char(ps, ch)) {
1316 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1317 return -EINVAL;
1318 }
1319 }
1320
1321 if (strlen(curr_operand(ps)))
1322 postfix_append_operand(ps, curr_operand(ps));
1323
1324 while (!filter_opstack_empty(ps)) {
1325 top_op = filter_opstack_pop(ps);
1326 if (top_op == OP_NONE)
1327 break;
1328 if (top_op == OP_OPEN_PAREN) {
1329 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1330 return -EINVAL;
1331 }
1332 postfix_append_op(ps, top_op);
1333 }
1334
1335 return 0;
1336 }
1337
1338 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1339 struct ftrace_event_call *call,
1340 int op, char *operand1, char *operand2)
1341 {
1342 struct ftrace_event_field *field;
1343 static struct filter_pred pred;
1344
1345 memset(&pred, 0, sizeof(pred));
1346 pred.op = op;
1347
1348 if (op == OP_AND || op == OP_OR)
1349 return &pred;
1350
1351 if (!operand1 || !operand2) {
1352 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1353 return NULL;
1354 }
1355
1356 field = trace_find_event_field(call, operand1);
1357 if (!field) {
1358 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1359 return NULL;
1360 }
1361
1362 strcpy(pred.regex.pattern, operand2);
1363 pred.regex.len = strlen(pred.regex.pattern);
1364 pred.field = field;
1365 return init_pred(ps, field, &pred) ? NULL : &pred;
1366 }
1367
1368 static int check_preds(struct filter_parse_state *ps)
1369 {
1370 int n_normal_preds = 0, n_logical_preds = 0;
1371 struct postfix_elt *elt;
1372
1373 list_for_each_entry(elt, &ps->postfix, list) {
1374 if (elt->op == OP_NONE)
1375 continue;
1376
1377 if (elt->op == OP_AND || elt->op == OP_OR) {
1378 n_logical_preds++;
1379 continue;
1380 }
1381 n_normal_preds++;
1382 }
1383
1384 if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1385 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1386 return -EINVAL;
1387 }
1388
1389 return 0;
1390 }
1391
1392 static int count_preds(struct filter_parse_state *ps)
1393 {
1394 struct postfix_elt *elt;
1395 int n_preds = 0;
1396
1397 list_for_each_entry(elt, &ps->postfix, list) {
1398 if (elt->op == OP_NONE)
1399 continue;
1400 n_preds++;
1401 }
1402
1403 return n_preds;
1404 }
1405
1406 struct check_pred_data {
1407 int count;
1408 int max;
1409 };
1410
1411 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1412 int *err, void *data)
1413 {
1414 struct check_pred_data *d = data;
1415
1416 if (WARN_ON(d->count++ > d->max)) {
1417 *err = -EINVAL;
1418 return WALK_PRED_ABORT;
1419 }
1420 return WALK_PRED_DEFAULT;
1421 }
1422
1423 /*
1424 * The tree is walked at filtering of an event. If the tree is not correctly
1425 * built, it may cause an infinite loop. Check here that the tree does
1426 * indeed terminate.
1427 */
1428 static int check_pred_tree(struct event_filter *filter,
1429 struct filter_pred *root)
1430 {
1431 struct check_pred_data data = {
1432 /*
1433 * The max that we can hit a node is three times.
1434 * Once going down, once coming up from left, and
1435 * once coming up from right. This is more than enough
1436 * since leafs are only hit a single time.
1437 */
1438 .max = 3 * filter->n_preds,
1439 .count = 0,
1440 };
1441
1442 return walk_pred_tree(filter->preds, root,
1443 check_pred_tree_cb, &data);
1444 }
1445
1446 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1447 int *err, void *data)
1448 {
1449 int *count = data;
1450
1451 if ((move == MOVE_DOWN) &&
1452 (pred->left == FILTER_PRED_INVALID))
1453 (*count)++;
1454
1455 return WALK_PRED_DEFAULT;
1456 }
1457
1458 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1459 {
1460 int count = 0, ret;
1461
1462 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1463 WARN_ON(ret);
1464 return count;
1465 }
1466
1467 struct fold_pred_data {
1468 struct filter_pred *root;
1469 int count;
1470 int children;
1471 };
1472
1473 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1474 int *err, void *data)
1475 {
1476 struct fold_pred_data *d = data;
1477 struct filter_pred *root = d->root;
1478
1479 if (move != MOVE_DOWN)
1480 return WALK_PRED_DEFAULT;
1481 if (pred->left != FILTER_PRED_INVALID)
1482 return WALK_PRED_DEFAULT;
1483
1484 if (WARN_ON(d->count == d->children)) {
1485 *err = -EINVAL;
1486 return WALK_PRED_ABORT;
1487 }
1488
1489 pred->index &= ~FILTER_PRED_FOLD;
1490 root->ops[d->count++] = pred->index;
1491 return WALK_PRED_DEFAULT;
1492 }
1493
1494 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1495 {
1496 struct fold_pred_data data = {
1497 .root = root,
1498 .count = 0,
1499 };
1500 int children;
1501
1502 /* No need to keep the fold flag */
1503 root->index &= ~FILTER_PRED_FOLD;
1504
1505 /* If the root is a leaf then do nothing */
1506 if (root->left == FILTER_PRED_INVALID)
1507 return 0;
1508
1509 /* count the children */
1510 children = count_leafs(preds, &preds[root->left]);
1511 children += count_leafs(preds, &preds[root->right]);
1512
1513 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1514 if (!root->ops)
1515 return -ENOMEM;
1516
1517 root->val = children;
1518 data.children = children;
1519 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1520 }
1521
1522 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1523 int *err, void *data)
1524 {
1525 struct filter_pred *preds = data;
1526
1527 if (move != MOVE_DOWN)
1528 return WALK_PRED_DEFAULT;
1529 if (!(pred->index & FILTER_PRED_FOLD))
1530 return WALK_PRED_DEFAULT;
1531
1532 *err = fold_pred(preds, pred);
1533 if (*err)
1534 return WALK_PRED_ABORT;
1535
1536 /* eveyrhing below is folded, continue with parent */
1537 return WALK_PRED_PARENT;
1538 }
1539
1540 /*
1541 * To optimize the processing of the ops, if we have several "ors" or
1542 * "ands" together, we can put them in an array and process them all
1543 * together speeding up the filter logic.
1544 */
1545 static int fold_pred_tree(struct event_filter *filter,
1546 struct filter_pred *root)
1547 {
1548 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1549 filter->preds);
1550 }
1551
1552 static int replace_preds(struct ftrace_event_call *call,
1553 struct event_filter *filter,
1554 struct filter_parse_state *ps,
1555 bool dry_run)
1556 {
1557 char *operand1 = NULL, *operand2 = NULL;
1558 struct filter_pred *pred;
1559 struct filter_pred *root;
1560 struct postfix_elt *elt;
1561 struct pred_stack stack = { }; /* init to NULL */
1562 int err;
1563 int n_preds = 0;
1564
1565 n_preds = count_preds(ps);
1566 if (n_preds >= MAX_FILTER_PRED) {
1567 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1568 return -ENOSPC;
1569 }
1570
1571 err = check_preds(ps);
1572 if (err)
1573 return err;
1574
1575 if (!dry_run) {
1576 err = __alloc_pred_stack(&stack, n_preds);
1577 if (err)
1578 return err;
1579 err = __alloc_preds(filter, n_preds);
1580 if (err)
1581 goto fail;
1582 }
1583
1584 n_preds = 0;
1585 list_for_each_entry(elt, &ps->postfix, list) {
1586 if (elt->op == OP_NONE) {
1587 if (!operand1)
1588 operand1 = elt->operand;
1589 else if (!operand2)
1590 operand2 = elt->operand;
1591 else {
1592 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1593 err = -EINVAL;
1594 goto fail;
1595 }
1596 continue;
1597 }
1598
1599 if (elt->op == OP_NOT) {
1600 if (!n_preds || operand1 || operand2) {
1601 parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
1602 err = -EINVAL;
1603 goto fail;
1604 }
1605 if (!dry_run)
1606 filter->preds[n_preds - 1].not ^= 1;
1607 continue;
1608 }
1609
1610 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1611 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1612 err = -ENOSPC;
1613 goto fail;
1614 }
1615
1616 pred = create_pred(ps, call, elt->op, operand1, operand2);
1617 if (!pred) {
1618 err = -EINVAL;
1619 goto fail;
1620 }
1621
1622 if (!dry_run) {
1623 err = filter_add_pred(ps, filter, pred, &stack);
1624 if (err)
1625 goto fail;
1626 }
1627
1628 operand1 = operand2 = NULL;
1629 }
1630
1631 if (!dry_run) {
1632 /* We should have one item left on the stack */
1633 pred = __pop_pred_stack(&stack);
1634 if (!pred)
1635 return -EINVAL;
1636 /* This item is where we start from in matching */
1637 root = pred;
1638 /* Make sure the stack is empty */
1639 pred = __pop_pred_stack(&stack);
1640 if (WARN_ON(pred)) {
1641 err = -EINVAL;
1642 filter->root = NULL;
1643 goto fail;
1644 }
1645 err = check_pred_tree(filter, root);
1646 if (err)
1647 goto fail;
1648
1649 /* Optimize the tree */
1650 err = fold_pred_tree(filter, root);
1651 if (err)
1652 goto fail;
1653
1654 /* We don't set root until we know it works */
1655 barrier();
1656 filter->root = root;
1657 }
1658
1659 err = 0;
1660 fail:
1661 __free_pred_stack(&stack);
1662 return err;
1663 }
1664
1665 static inline void event_set_filtered_flag(struct ftrace_event_file *file)
1666 {
1667 struct ftrace_event_call *call = file->event_call;
1668
1669 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1670 call->flags |= TRACE_EVENT_FL_FILTERED;
1671 else
1672 file->flags |= FTRACE_EVENT_FL_FILTERED;
1673 }
1674
1675 static inline void event_set_filter(struct ftrace_event_file *file,
1676 struct event_filter *filter)
1677 {
1678 struct ftrace_event_call *call = file->event_call;
1679
1680 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1681 rcu_assign_pointer(call->filter, filter);
1682 else
1683 rcu_assign_pointer(file->filter, filter);
1684 }
1685
1686 static inline void event_clear_filter(struct ftrace_event_file *file)
1687 {
1688 struct ftrace_event_call *call = file->event_call;
1689
1690 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1691 RCU_INIT_POINTER(call->filter, NULL);
1692 else
1693 RCU_INIT_POINTER(file->filter, NULL);
1694 }
1695
1696 static inline void
1697 event_set_no_set_filter_flag(struct ftrace_event_file *file)
1698 {
1699 struct ftrace_event_call *call = file->event_call;
1700
1701 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1702 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1703 else
1704 file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER;
1705 }
1706
1707 static inline void
1708 event_clear_no_set_filter_flag(struct ftrace_event_file *file)
1709 {
1710 struct ftrace_event_call *call = file->event_call;
1711
1712 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
1713 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1714 else
1715 file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER;
1716 }
1717
1718 static inline bool
1719 event_no_set_filter_flag(struct ftrace_event_file *file)
1720 {
1721 struct ftrace_event_call *call = file->event_call;
1722
1723 if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER)
1724 return true;
1725
1726 if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
1727 (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
1728 return true;
1729
1730 return false;
1731 }
1732
1733 struct filter_list {
1734 struct list_head list;
1735 struct event_filter *filter;
1736 };
1737
1738 static int replace_system_preds(struct ftrace_subsystem_dir *dir,
1739 struct trace_array *tr,
1740 struct filter_parse_state *ps,
1741 char *filter_string)
1742 {
1743 struct ftrace_event_file *file;
1744 struct filter_list *filter_item;
1745 struct filter_list *tmp;
1746 LIST_HEAD(filter_list);
1747 bool fail = true;
1748 int err;
1749
1750 list_for_each_entry(file, &tr->events, list) {
1751 if (file->system != dir)
1752 continue;
1753
1754 /*
1755 * Try to see if the filter can be applied
1756 * (filter arg is ignored on dry_run)
1757 */
1758 err = replace_preds(file->event_call, NULL, ps, true);
1759 if (err)
1760 event_set_no_set_filter_flag(file);
1761 else
1762 event_clear_no_set_filter_flag(file);
1763 }
1764
1765 list_for_each_entry(file, &tr->events, list) {
1766 struct event_filter *filter;
1767
1768 if (file->system != dir)
1769 continue;
1770
1771 if (event_no_set_filter_flag(file))
1772 continue;
1773
1774 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1775 if (!filter_item)
1776 goto fail_mem;
1777
1778 list_add_tail(&filter_item->list, &filter_list);
1779
1780 filter_item->filter = __alloc_filter();
1781 if (!filter_item->filter)
1782 goto fail_mem;
1783 filter = filter_item->filter;
1784
1785 /* Can only fail on no memory */
1786 err = replace_filter_string(filter, filter_string);
1787 if (err)
1788 goto fail_mem;
1789
1790 err = replace_preds(file->event_call, filter, ps, false);
1791 if (err) {
1792 filter_disable(file);
1793 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1794 append_filter_err(ps, filter);
1795 } else
1796 event_set_filtered_flag(file);
1797 /*
1798 * Regardless of if this returned an error, we still
1799 * replace the filter for the call.
1800 */
1801 filter = event_filter(file);
1802 event_set_filter(file, filter_item->filter);
1803 filter_item->filter = filter;
1804
1805 fail = false;
1806 }
1807
1808 if (fail)
1809 goto fail;
1810
1811 /*
1812 * The calls can still be using the old filters.
1813 * Do a synchronize_sched() to ensure all calls are
1814 * done with them before we free them.
1815 */
1816 synchronize_sched();
1817 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1818 __free_filter(filter_item->filter);
1819 list_del(&filter_item->list);
1820 kfree(filter_item);
1821 }
1822 return 0;
1823 fail:
1824 /* No call succeeded */
1825 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1826 list_del(&filter_item->list);
1827 kfree(filter_item);
1828 }
1829 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1830 return -EINVAL;
1831 fail_mem:
1832 /* If any call succeeded, we still need to sync */
1833 if (!fail)
1834 synchronize_sched();
1835 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1836 __free_filter(filter_item->filter);
1837 list_del(&filter_item->list);
1838 kfree(filter_item);
1839 }
1840 return -ENOMEM;
1841 }
1842
1843 static int create_filter_start(char *filter_str, bool set_str,
1844 struct filter_parse_state **psp,
1845 struct event_filter **filterp)
1846 {
1847 struct event_filter *filter;
1848 struct filter_parse_state *ps = NULL;
1849 int err = 0;
1850
1851 WARN_ON_ONCE(*psp || *filterp);
1852
1853 /* allocate everything, and if any fails, free all and fail */
1854 filter = __alloc_filter();
1855 if (filter && set_str)
1856 err = replace_filter_string(filter, filter_str);
1857
1858 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1859
1860 if (!filter || !ps || err) {
1861 kfree(ps);
1862 __free_filter(filter);
1863 return -ENOMEM;
1864 }
1865
1866 /* we're committed to creating a new filter */
1867 *filterp = filter;
1868 *psp = ps;
1869
1870 parse_init(ps, filter_ops, filter_str);
1871 err = filter_parse(ps);
1872 if (err && set_str)
1873 append_filter_err(ps, filter);
1874 return err;
1875 }
1876
1877 static void create_filter_finish(struct filter_parse_state *ps)
1878 {
1879 if (ps) {
1880 filter_opstack_clear(ps);
1881 postfix_clear(ps);
1882 kfree(ps);
1883 }
1884 }
1885
1886 /**
1887 * create_filter - create a filter for a ftrace_event_call
1888 * @call: ftrace_event_call to create a filter for
1889 * @filter_str: filter string
1890 * @set_str: remember @filter_str and enable detailed error in filter
1891 * @filterp: out param for created filter (always updated on return)
1892 *
1893 * Creates a filter for @call with @filter_str. If @set_str is %true,
1894 * @filter_str is copied and recorded in the new filter.
1895 *
1896 * On success, returns 0 and *@filterp points to the new filter. On
1897 * failure, returns -errno and *@filterp may point to %NULL or to a new
1898 * filter. In the latter case, the returned filter contains error
1899 * information if @set_str is %true and the caller is responsible for
1900 * freeing it.
1901 */
1902 static int create_filter(struct ftrace_event_call *call,
1903 char *filter_str, bool set_str,
1904 struct event_filter **filterp)
1905 {
1906 struct event_filter *filter = NULL;
1907 struct filter_parse_state *ps = NULL;
1908 int err;
1909
1910 err = create_filter_start(filter_str, set_str, &ps, &filter);
1911 if (!err) {
1912 err = replace_preds(call, filter, ps, false);
1913 if (err && set_str)
1914 append_filter_err(ps, filter);
1915 }
1916 create_filter_finish(ps);
1917
1918 *filterp = filter;
1919 return err;
1920 }
1921
1922 int create_event_filter(struct ftrace_event_call *call,
1923 char *filter_str, bool set_str,
1924 struct event_filter **filterp)
1925 {
1926 return create_filter(call, filter_str, set_str, filterp);
1927 }
1928
1929 /**
1930 * create_system_filter - create a filter for an event_subsystem
1931 * @system: event_subsystem to create a filter for
1932 * @filter_str: filter string
1933 * @filterp: out param for created filter (always updated on return)
1934 *
1935 * Identical to create_filter() except that it creates a subsystem filter
1936 * and always remembers @filter_str.
1937 */
1938 static int create_system_filter(struct ftrace_subsystem_dir *dir,
1939 struct trace_array *tr,
1940 char *filter_str, struct event_filter **filterp)
1941 {
1942 struct event_filter *filter = NULL;
1943 struct filter_parse_state *ps = NULL;
1944 int err;
1945
1946 err = create_filter_start(filter_str, true, &ps, &filter);
1947 if (!err) {
1948 err = replace_system_preds(dir, tr, ps, filter_str);
1949 if (!err) {
1950 /* System filters just show a default message */
1951 kfree(filter->filter_string);
1952 filter->filter_string = NULL;
1953 } else {
1954 append_filter_err(ps, filter);
1955 }
1956 }
1957 create_filter_finish(ps);
1958
1959 *filterp = filter;
1960 return err;
1961 }
1962
1963 /* caller must hold event_mutex */
1964 int apply_event_filter(struct ftrace_event_file *file, char *filter_string)
1965 {
1966 struct ftrace_event_call *call = file->event_call;
1967 struct event_filter *filter;
1968 int err;
1969
1970 if (!strcmp(strstrip(filter_string), "0")) {
1971 filter_disable(file);
1972 filter = event_filter(file);
1973
1974 if (!filter)
1975 return 0;
1976
1977 event_clear_filter(file);
1978
1979 /* Make sure the filter is not being used */
1980 synchronize_sched();
1981 __free_filter(filter);
1982
1983 return 0;
1984 }
1985
1986 err = create_filter(call, filter_string, true, &filter);
1987
1988 /*
1989 * Always swap the call filter with the new filter
1990 * even if there was an error. If there was an error
1991 * in the filter, we disable the filter and show the error
1992 * string
1993 */
1994 if (filter) {
1995 struct event_filter *tmp;
1996
1997 tmp = event_filter(file);
1998 if (!err)
1999 event_set_filtered_flag(file);
2000 else
2001 filter_disable(file);
2002
2003 event_set_filter(file, filter);
2004
2005 if (tmp) {
2006 /* Make sure the call is done with the filter */
2007 synchronize_sched();
2008 __free_filter(tmp);
2009 }
2010 }
2011
2012 return err;
2013 }
2014
2015 int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
2016 char *filter_string)
2017 {
2018 struct event_subsystem *system = dir->subsystem;
2019 struct trace_array *tr = dir->tr;
2020 struct event_filter *filter;
2021 int err = 0;
2022
2023 mutex_lock(&event_mutex);
2024
2025 /* Make sure the system still has events */
2026 if (!dir->nr_events) {
2027 err = -ENODEV;
2028 goto out_unlock;
2029 }
2030
2031 if (!strcmp(strstrip(filter_string), "0")) {
2032 filter_free_subsystem_preds(dir, tr);
2033 remove_filter_string(system->filter);
2034 filter = system->filter;
2035 system->filter = NULL;
2036 /* Ensure all filters are no longer used */
2037 synchronize_sched();
2038 filter_free_subsystem_filters(dir, tr);
2039 __free_filter(filter);
2040 goto out_unlock;
2041 }
2042
2043 err = create_system_filter(dir, tr, filter_string, &filter);
2044 if (filter) {
2045 /*
2046 * No event actually uses the system filter
2047 * we can free it without synchronize_sched().
2048 */
2049 __free_filter(system->filter);
2050 system->filter = filter;
2051 }
2052 out_unlock:
2053 mutex_unlock(&event_mutex);
2054
2055 return err;
2056 }
2057
2058 #ifdef CONFIG_PERF_EVENTS
2059
2060 void ftrace_profile_free_filter(struct perf_event *event)
2061 {
2062 struct event_filter *filter = event->filter;
2063
2064 event->filter = NULL;
2065 __free_filter(filter);
2066 }
2067
2068 struct function_filter_data {
2069 struct ftrace_ops *ops;
2070 int first_filter;
2071 int first_notrace;
2072 };
2073
2074 #ifdef CONFIG_FUNCTION_TRACER
2075 static char **
2076 ftrace_function_filter_re(char *buf, int len, int *count)
2077 {
2078 char *str, *sep, **re;
2079
2080 str = kstrndup(buf, len, GFP_KERNEL);
2081 if (!str)
2082 return NULL;
2083
2084 /*
2085 * The argv_split function takes white space
2086 * as a separator, so convert ',' into spaces.
2087 */
2088 while ((sep = strchr(str, ',')))
2089 *sep = ' ';
2090
2091 re = argv_split(GFP_KERNEL, str, count);
2092 kfree(str);
2093 return re;
2094 }
2095
2096 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2097 int reset, char *re, int len)
2098 {
2099 int ret;
2100
2101 if (filter)
2102 ret = ftrace_set_filter(ops, re, len, reset);
2103 else
2104 ret = ftrace_set_notrace(ops, re, len, reset);
2105
2106 return ret;
2107 }
2108
2109 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2110 struct function_filter_data *data)
2111 {
2112 int i, re_cnt, ret = -EINVAL;
2113 int *reset;
2114 char **re;
2115
2116 reset = filter ? &data->first_filter : &data->first_notrace;
2117
2118 /*
2119 * The 'ip' field could have multiple filters set, separated
2120 * either by space or comma. We first cut the filter and apply
2121 * all pieces separatelly.
2122 */
2123 re = ftrace_function_filter_re(buf, len, &re_cnt);
2124 if (!re)
2125 return -EINVAL;
2126
2127 for (i = 0; i < re_cnt; i++) {
2128 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2129 re[i], strlen(re[i]));
2130 if (ret)
2131 break;
2132
2133 if (*reset)
2134 *reset = 0;
2135 }
2136
2137 argv_free(re);
2138 return ret;
2139 }
2140
2141 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2142 {
2143 struct ftrace_event_field *field = pred->field;
2144
2145 if (leaf) {
2146 /*
2147 * Check the leaf predicate for function trace, verify:
2148 * - only '==' and '!=' is used
2149 * - the 'ip' field is used
2150 */
2151 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2152 return -EINVAL;
2153
2154 if (strcmp(field->name, "ip"))
2155 return -EINVAL;
2156 } else {
2157 /*
2158 * Check the non leaf predicate for function trace, verify:
2159 * - only '||' is used
2160 */
2161 if (pred->op != OP_OR)
2162 return -EINVAL;
2163 }
2164
2165 return 0;
2166 }
2167
2168 static int ftrace_function_set_filter_cb(enum move_type move,
2169 struct filter_pred *pred,
2170 int *err, void *data)
2171 {
2172 /* Checking the node is valid for function trace. */
2173 if ((move != MOVE_DOWN) ||
2174 (pred->left != FILTER_PRED_INVALID)) {
2175 *err = ftrace_function_check_pred(pred, 0);
2176 } else {
2177 *err = ftrace_function_check_pred(pred, 1);
2178 if (*err)
2179 return WALK_PRED_ABORT;
2180
2181 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2182 pred->regex.pattern,
2183 pred->regex.len,
2184 data);
2185 }
2186
2187 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2188 }
2189
2190 static int ftrace_function_set_filter(struct perf_event *event,
2191 struct event_filter *filter)
2192 {
2193 struct function_filter_data data = {
2194 .first_filter = 1,
2195 .first_notrace = 1,
2196 .ops = &event->ftrace_ops,
2197 };
2198
2199 return walk_pred_tree(filter->preds, filter->root,
2200 ftrace_function_set_filter_cb, &data);
2201 }
2202 #else
2203 static int ftrace_function_set_filter(struct perf_event *event,
2204 struct event_filter *filter)
2205 {
2206 return -ENODEV;
2207 }
2208 #endif /* CONFIG_FUNCTION_TRACER */
2209
2210 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2211 char *filter_str)
2212 {
2213 int err;
2214 struct event_filter *filter;
2215 struct ftrace_event_call *call;
2216
2217 mutex_lock(&event_mutex);
2218
2219 call = event->tp_event;
2220
2221 err = -EINVAL;
2222 if (!call)
2223 goto out_unlock;
2224
2225 err = -EEXIST;
2226 if (event->filter)
2227 goto out_unlock;
2228
2229 err = create_filter(call, filter_str, false, &filter);
2230 if (err)
2231 goto free_filter;
2232
2233 if (ftrace_event_is_function(call))
2234 err = ftrace_function_set_filter(event, filter);
2235 else
2236 event->filter = filter;
2237
2238 free_filter:
2239 if (err || ftrace_event_is_function(call))
2240 __free_filter(filter);
2241
2242 out_unlock:
2243 mutex_unlock(&event_mutex);
2244
2245 return err;
2246 }
2247
2248 #endif /* CONFIG_PERF_EVENTS */
2249
2250 #ifdef CONFIG_FTRACE_STARTUP_TEST
2251
2252 #include <linux/types.h>
2253 #include <linux/tracepoint.h>
2254
2255 #define CREATE_TRACE_POINTS
2256 #include "trace_events_filter_test.h"
2257
2258 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2259 { \
2260 .filter = FILTER, \
2261 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2262 .e = ve, .f = vf, .g = vg, .h = vh }, \
2263 .match = m, \
2264 .not_visited = nvisit, \
2265 }
2266 #define YES 1
2267 #define NO 0
2268
2269 static struct test_filter_data_t {
2270 char *filter;
2271 struct ftrace_raw_ftrace_test_filter rec;
2272 int match;
2273 char *not_visited;
2274 } test_filter_data[] = {
2275 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2276 "e == 1 && f == 1 && g == 1 && h == 1"
2277 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2278 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2279 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2280 #undef FILTER
2281 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2282 "e == 1 || f == 1 || g == 1 || h == 1"
2283 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2284 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2285 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2286 #undef FILTER
2287 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2288 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2289 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2290 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2291 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2292 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2293 #undef FILTER
2294 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2295 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2296 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2297 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2298 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2299 #undef FILTER
2300 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2301 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2302 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2303 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2304 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2305 #undef FILTER
2306 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2307 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2308 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2309 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2310 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2311 #undef FILTER
2312 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2313 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2314 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2315 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2316 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2317 #undef FILTER
2318 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2319 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2320 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2321 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2322 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2323 };
2324
2325 #undef DATA_REC
2326 #undef FILTER
2327 #undef YES
2328 #undef NO
2329
2330 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2331
2332 static int test_pred_visited;
2333
2334 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2335 {
2336 struct ftrace_event_field *field = pred->field;
2337
2338 test_pred_visited = 1;
2339 printk(KERN_INFO "\npred visited %s\n", field->name);
2340 return 1;
2341 }
2342
2343 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2344 int *err, void *data)
2345 {
2346 char *fields = data;
2347
2348 if ((move == MOVE_DOWN) &&
2349 (pred->left == FILTER_PRED_INVALID)) {
2350 struct ftrace_event_field *field = pred->field;
2351
2352 if (!field) {
2353 WARN(1, "all leafs should have field defined");
2354 return WALK_PRED_DEFAULT;
2355 }
2356 if (!strchr(fields, *field->name))
2357 return WALK_PRED_DEFAULT;
2358
2359 WARN_ON(!pred->fn);
2360 pred->fn = test_pred_visited_fn;
2361 }
2362 return WALK_PRED_DEFAULT;
2363 }
2364
2365 static __init int ftrace_test_event_filter(void)
2366 {
2367 int i;
2368
2369 printk(KERN_INFO "Testing ftrace filter: ");
2370
2371 for (i = 0; i < DATA_CNT; i++) {
2372 struct event_filter *filter = NULL;
2373 struct test_filter_data_t *d = &test_filter_data[i];
2374 int err;
2375
2376 err = create_filter(&event_ftrace_test_filter, d->filter,
2377 false, &filter);
2378 if (err) {
2379 printk(KERN_INFO
2380 "Failed to get filter for '%s', err %d\n",
2381 d->filter, err);
2382 __free_filter(filter);
2383 break;
2384 }
2385
2386 /*
2387 * The preemption disabling is not really needed for self
2388 * tests, but the rcu dereference will complain without it.
2389 */
2390 preempt_disable();
2391 if (*d->not_visited)
2392 walk_pred_tree(filter->preds, filter->root,
2393 test_walk_pred_cb,
2394 d->not_visited);
2395
2396 test_pred_visited = 0;
2397 err = filter_match_preds(filter, &d->rec);
2398 preempt_enable();
2399
2400 __free_filter(filter);
2401
2402 if (test_pred_visited) {
2403 printk(KERN_INFO
2404 "Failed, unwanted pred visited for filter %s\n",
2405 d->filter);
2406 break;
2407 }
2408
2409 if (err != d->match) {
2410 printk(KERN_INFO
2411 "Failed to match filter '%s', expected %d\n",
2412 d->filter, d->match);
2413 break;
2414 }
2415 }
2416
2417 if (i == DATA_CNT)
2418 printk(KERN_CONT "OK\n");
2419
2420 return 0;
2421 }
2422
2423 late_initcall(ftrace_test_event_filter);
2424
2425 #endif /* CONFIG_FTRACE_STARTUP_TEST */