]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/trace_events_filter.c
6a642e2782411de6f8d891d4dde7f8df136d21e6
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_events_filter.c
1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 enum filter_op_ids
31 {
32 OP_OR,
33 OP_AND,
34 OP_GLOB,
35 OP_NE,
36 OP_EQ,
37 OP_LT,
38 OP_LE,
39 OP_GT,
40 OP_GE,
41 OP_NONE,
42 OP_OPEN_PAREN,
43 };
44
45 struct filter_op {
46 int id;
47 char *string;
48 int precedence;
49 };
50
51 static struct filter_op filter_ops[] = {
52 { OP_OR, "||", 1 },
53 { OP_AND, "&&", 2 },
54 { OP_GLOB, "~", 4 },
55 { OP_NE, "!=", 4 },
56 { OP_EQ, "==", 4 },
57 { OP_LT, "<", 5 },
58 { OP_LE, "<=", 5 },
59 { OP_GT, ">", 5 },
60 { OP_GE, ">=", 5 },
61 { OP_NONE, "OP_NONE", 0 },
62 { OP_OPEN_PAREN, "(", 0 },
63 };
64
65 enum {
66 FILT_ERR_NONE,
67 FILT_ERR_INVALID_OP,
68 FILT_ERR_UNBALANCED_PAREN,
69 FILT_ERR_TOO_MANY_OPERANDS,
70 FILT_ERR_OPERAND_TOO_LONG,
71 FILT_ERR_FIELD_NOT_FOUND,
72 FILT_ERR_ILLEGAL_FIELD_OP,
73 FILT_ERR_ILLEGAL_INTVAL,
74 FILT_ERR_BAD_SUBSYS_FILTER,
75 FILT_ERR_TOO_MANY_PREDS,
76 FILT_ERR_MISSING_FIELD,
77 FILT_ERR_INVALID_FILTER,
78 };
79
80 static char *err_text[] = {
81 "No error",
82 "Invalid operator",
83 "Unbalanced parens",
84 "Too many operands",
85 "Operand too long",
86 "Field not found",
87 "Illegal operation for field type",
88 "Illegal integer value",
89 "Couldn't find or set field in one of a subsystem's events",
90 "Too many terms in predicate expression",
91 "Missing field name and/or value",
92 "Meaningless filter expression",
93 };
94
95 struct opstack_op {
96 int op;
97 struct list_head list;
98 };
99
100 struct postfix_elt {
101 int op;
102 char *operand;
103 struct list_head list;
104 };
105
106 struct filter_parse_state {
107 struct filter_op *ops;
108 struct list_head opstack;
109 struct list_head postfix;
110 int lasterr;
111 int lasterr_pos;
112
113 struct {
114 char *string;
115 unsigned int cnt;
116 unsigned int tail;
117 } infix;
118
119 struct {
120 char string[MAX_FILTER_STR_VAL];
121 int pos;
122 unsigned int tail;
123 } operand;
124 };
125
126 struct pred_stack {
127 struct filter_pred **preds;
128 int index;
129 };
130
131 #define DEFINE_COMPARISON_PRED(type) \
132 static int filter_pred_##type(struct filter_pred *pred, void *event) \
133 { \
134 type *addr = (type *)(event + pred->offset); \
135 type val = (type)pred->val; \
136 int match = 0; \
137 \
138 switch (pred->op) { \
139 case OP_LT: \
140 match = (*addr < val); \
141 break; \
142 case OP_LE: \
143 match = (*addr <= val); \
144 break; \
145 case OP_GT: \
146 match = (*addr > val); \
147 break; \
148 case OP_GE: \
149 match = (*addr >= val); \
150 break; \
151 default: \
152 break; \
153 } \
154 \
155 return match; \
156 }
157
158 #define DEFINE_EQUALITY_PRED(size) \
159 static int filter_pred_##size(struct filter_pred *pred, void *event) \
160 { \
161 u##size *addr = (u##size *)(event + pred->offset); \
162 u##size val = (u##size)pred->val; \
163 int match; \
164 \
165 match = (val == *addr) ^ pred->not; \
166 \
167 return match; \
168 }
169
170 DEFINE_COMPARISON_PRED(s64);
171 DEFINE_COMPARISON_PRED(u64);
172 DEFINE_COMPARISON_PRED(s32);
173 DEFINE_COMPARISON_PRED(u32);
174 DEFINE_COMPARISON_PRED(s16);
175 DEFINE_COMPARISON_PRED(u16);
176 DEFINE_COMPARISON_PRED(s8);
177 DEFINE_COMPARISON_PRED(u8);
178
179 DEFINE_EQUALITY_PRED(64);
180 DEFINE_EQUALITY_PRED(32);
181 DEFINE_EQUALITY_PRED(16);
182 DEFINE_EQUALITY_PRED(8);
183
184 /* Filter predicate for fixed sized arrays of characters */
185 static int filter_pred_string(struct filter_pred *pred, void *event)
186 {
187 char *addr = (char *)(event + pred->offset);
188 int cmp, match;
189
190 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
191
192 match = cmp ^ pred->not;
193
194 return match;
195 }
196
197 /* Filter predicate for char * pointers */
198 static int filter_pred_pchar(struct filter_pred *pred, void *event)
199 {
200 char **addr = (char **)(event + pred->offset);
201 int cmp, match;
202 int len = strlen(*addr) + 1; /* including tailing '\0' */
203
204 cmp = pred->regex.match(*addr, &pred->regex, len);
205
206 match = cmp ^ pred->not;
207
208 return match;
209 }
210
211 /*
212 * Filter predicate for dynamic sized arrays of characters.
213 * These are implemented through a list of strings at the end
214 * of the entry.
215 * Also each of these strings have a field in the entry which
216 * contains its offset from the beginning of the entry.
217 * We have then first to get this field, dereference it
218 * and add it to the address of the entry, and at last we have
219 * the address of the string.
220 */
221 static int filter_pred_strloc(struct filter_pred *pred, void *event)
222 {
223 u32 str_item = *(u32 *)(event + pred->offset);
224 int str_loc = str_item & 0xffff;
225 int str_len = str_item >> 16;
226 char *addr = (char *)(event + str_loc);
227 int cmp, match;
228
229 cmp = pred->regex.match(addr, &pred->regex, str_len);
230
231 match = cmp ^ pred->not;
232
233 return match;
234 }
235
236 static int filter_pred_none(struct filter_pred *pred, void *event)
237 {
238 return 0;
239 }
240
241 /*
242 * regex_match_foo - Basic regex callbacks
243 *
244 * @str: the string to be searched
245 * @r: the regex structure containing the pattern string
246 * @len: the length of the string to be searched (including '\0')
247 *
248 * Note:
249 * - @str might not be NULL-terminated if it's of type DYN_STRING
250 * or STATIC_STRING
251 */
252
253 static int regex_match_full(char *str, struct regex *r, int len)
254 {
255 if (strncmp(str, r->pattern, len) == 0)
256 return 1;
257 return 0;
258 }
259
260 static int regex_match_front(char *str, struct regex *r, int len)
261 {
262 if (strncmp(str, r->pattern, r->len) == 0)
263 return 1;
264 return 0;
265 }
266
267 static int regex_match_middle(char *str, struct regex *r, int len)
268 {
269 if (strnstr(str, r->pattern, len))
270 return 1;
271 return 0;
272 }
273
274 static int regex_match_end(char *str, struct regex *r, int len)
275 {
276 int strlen = len - 1;
277
278 if (strlen >= r->len &&
279 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
280 return 1;
281 return 0;
282 }
283
284 /**
285 * filter_parse_regex - parse a basic regex
286 * @buff: the raw regex
287 * @len: length of the regex
288 * @search: will point to the beginning of the string to compare
289 * @not: tell whether the match will have to be inverted
290 *
291 * This passes in a buffer containing a regex and this function will
292 * set search to point to the search part of the buffer and
293 * return the type of search it is (see enum above).
294 * This does modify buff.
295 *
296 * Returns enum type.
297 * search returns the pointer to use for comparison.
298 * not returns 1 if buff started with a '!'
299 * 0 otherwise.
300 */
301 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
302 {
303 int type = MATCH_FULL;
304 int i;
305
306 if (buff[0] == '!') {
307 *not = 1;
308 buff++;
309 len--;
310 } else
311 *not = 0;
312
313 *search = buff;
314
315 for (i = 0; i < len; i++) {
316 if (buff[i] == '*') {
317 if (!i) {
318 *search = buff + 1;
319 type = MATCH_END_ONLY;
320 } else {
321 if (type == MATCH_END_ONLY)
322 type = MATCH_MIDDLE_ONLY;
323 else
324 type = MATCH_FRONT_ONLY;
325 buff[i] = 0;
326 break;
327 }
328 }
329 }
330
331 return type;
332 }
333
334 static void filter_build_regex(struct filter_pred *pred)
335 {
336 struct regex *r = &pred->regex;
337 char *search;
338 enum regex_type type = MATCH_FULL;
339 int not = 0;
340
341 if (pred->op == OP_GLOB) {
342 type = filter_parse_regex(r->pattern, r->len, &search, &not);
343 r->len = strlen(search);
344 memmove(r->pattern, search, r->len+1);
345 }
346
347 switch (type) {
348 case MATCH_FULL:
349 r->match = regex_match_full;
350 break;
351 case MATCH_FRONT_ONLY:
352 r->match = regex_match_front;
353 break;
354 case MATCH_MIDDLE_ONLY:
355 r->match = regex_match_middle;
356 break;
357 case MATCH_END_ONLY:
358 r->match = regex_match_end;
359 break;
360 }
361
362 pred->not ^= not;
363 }
364
365 enum move_type {
366 MOVE_DOWN,
367 MOVE_UP_FROM_LEFT,
368 MOVE_UP_FROM_RIGHT
369 };
370
371 static struct filter_pred *
372 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
373 int index, enum move_type *move)
374 {
375 if (pred->parent & FILTER_PRED_IS_RIGHT)
376 *move = MOVE_UP_FROM_RIGHT;
377 else
378 *move = MOVE_UP_FROM_LEFT;
379 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
380
381 return pred;
382 }
383
384 enum walk_return {
385 WALK_PRED_ABORT,
386 WALK_PRED_PARENT,
387 WALK_PRED_DEFAULT,
388 };
389
390 typedef int (*filter_pred_walkcb_t) (enum move_type move,
391 struct filter_pred *pred,
392 int *err, void *data);
393
394 static int walk_pred_tree(struct filter_pred *preds,
395 struct filter_pred *root,
396 filter_pred_walkcb_t cb, void *data)
397 {
398 struct filter_pred *pred = root;
399 enum move_type move = MOVE_DOWN;
400 int done = 0;
401
402 if (!preds)
403 return -EINVAL;
404
405 do {
406 int err = 0, ret;
407
408 ret = cb(move, pred, &err, data);
409 if (ret == WALK_PRED_ABORT)
410 return err;
411 if (ret == WALK_PRED_PARENT)
412 goto get_parent;
413
414 switch (move) {
415 case MOVE_DOWN:
416 if (pred->left != FILTER_PRED_INVALID) {
417 pred = &preds[pred->left];
418 continue;
419 }
420 goto get_parent;
421 case MOVE_UP_FROM_LEFT:
422 pred = &preds[pred->right];
423 move = MOVE_DOWN;
424 continue;
425 case MOVE_UP_FROM_RIGHT:
426 get_parent:
427 if (pred == root)
428 break;
429 pred = get_pred_parent(pred, preds,
430 pred->parent,
431 &move);
432 continue;
433 }
434 done = 1;
435 } while (!done);
436
437 /* We are fine. */
438 return 0;
439 }
440
441 /*
442 * A series of AND or ORs where found together. Instead of
443 * climbing up and down the tree branches, an array of the
444 * ops were made in order of checks. We can just move across
445 * the array and short circuit if needed.
446 */
447 static int process_ops(struct filter_pred *preds,
448 struct filter_pred *op, void *rec)
449 {
450 struct filter_pred *pred;
451 int match = 0;
452 int type;
453 int i;
454
455 /*
456 * Micro-optimization: We set type to true if op
457 * is an OR and false otherwise (AND). Then we
458 * just need to test if the match is equal to
459 * the type, and if it is, we can short circuit the
460 * rest of the checks:
461 *
462 * if ((match && op->op == OP_OR) ||
463 * (!match && op->op == OP_AND))
464 * return match;
465 */
466 type = op->op == OP_OR;
467
468 for (i = 0; i < op->val; i++) {
469 pred = &preds[op->ops[i]];
470 if (!WARN_ON_ONCE(!pred->fn))
471 match = pred->fn(pred, rec);
472 if (!!match == type)
473 return match;
474 }
475 return match;
476 }
477
478 struct filter_match_preds_data {
479 struct filter_pred *preds;
480 int match;
481 void *rec;
482 };
483
484 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
485 int *err, void *data)
486 {
487 struct filter_match_preds_data *d = data;
488
489 *err = 0;
490 switch (move) {
491 case MOVE_DOWN:
492 /* only AND and OR have children */
493 if (pred->left != FILTER_PRED_INVALID) {
494 /* If ops is set, then it was folded. */
495 if (!pred->ops)
496 return WALK_PRED_DEFAULT;
497 /* We can treat folded ops as a leaf node */
498 d->match = process_ops(d->preds, pred, d->rec);
499 } else {
500 if (!WARN_ON_ONCE(!pred->fn))
501 d->match = pred->fn(pred, d->rec);
502 }
503
504 return WALK_PRED_PARENT;
505 case MOVE_UP_FROM_LEFT:
506 /*
507 * Check for short circuits.
508 *
509 * Optimization: !!match == (pred->op == OP_OR)
510 * is the same as:
511 * if ((match && pred->op == OP_OR) ||
512 * (!match && pred->op == OP_AND))
513 */
514 if (!!d->match == (pred->op == OP_OR))
515 return WALK_PRED_PARENT;
516 break;
517 case MOVE_UP_FROM_RIGHT:
518 break;
519 }
520
521 return WALK_PRED_DEFAULT;
522 }
523
524 /* return 1 if event matches, 0 otherwise (discard) */
525 int filter_match_preds(struct event_filter *filter, void *rec)
526 {
527 struct filter_pred *preds;
528 struct filter_pred *root;
529 struct filter_match_preds_data data = {
530 /* match is currently meaningless */
531 .match = -1,
532 .rec = rec,
533 };
534 int n_preds, ret;
535
536 /* no filter is considered a match */
537 if (!filter)
538 return 1;
539
540 n_preds = filter->n_preds;
541 if (!n_preds)
542 return 1;
543
544 /*
545 * n_preds, root and filter->preds are protect with preemption disabled.
546 */
547 root = rcu_dereference_sched(filter->root);
548 if (!root)
549 return 1;
550
551 data.preds = preds = rcu_dereference_sched(filter->preds);
552 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
553 WARN_ON(ret);
554 return data.match;
555 }
556 EXPORT_SYMBOL_GPL(filter_match_preds);
557
558 static void parse_error(struct filter_parse_state *ps, int err, int pos)
559 {
560 ps->lasterr = err;
561 ps->lasterr_pos = pos;
562 }
563
564 static void remove_filter_string(struct event_filter *filter)
565 {
566 if (!filter)
567 return;
568
569 kfree(filter->filter_string);
570 filter->filter_string = NULL;
571 }
572
573 static int replace_filter_string(struct event_filter *filter,
574 char *filter_string)
575 {
576 kfree(filter->filter_string);
577 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
578 if (!filter->filter_string)
579 return -ENOMEM;
580
581 return 0;
582 }
583
584 static int append_filter_string(struct event_filter *filter,
585 char *string)
586 {
587 int newlen;
588 char *new_filter_string;
589
590 BUG_ON(!filter->filter_string);
591 newlen = strlen(filter->filter_string) + strlen(string) + 1;
592 new_filter_string = kmalloc(newlen, GFP_KERNEL);
593 if (!new_filter_string)
594 return -ENOMEM;
595
596 strcpy(new_filter_string, filter->filter_string);
597 strcat(new_filter_string, string);
598 kfree(filter->filter_string);
599 filter->filter_string = new_filter_string;
600
601 return 0;
602 }
603
604 static void append_filter_err(struct filter_parse_state *ps,
605 struct event_filter *filter)
606 {
607 int pos = ps->lasterr_pos;
608 char *buf, *pbuf;
609
610 buf = (char *)__get_free_page(GFP_TEMPORARY);
611 if (!buf)
612 return;
613
614 append_filter_string(filter, "\n");
615 memset(buf, ' ', PAGE_SIZE);
616 if (pos > PAGE_SIZE - 128)
617 pos = 0;
618 buf[pos] = '^';
619 pbuf = &buf[pos] + 1;
620
621 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
622 append_filter_string(filter, buf);
623 free_page((unsigned long) buf);
624 }
625
626 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
627 {
628 struct event_filter *filter;
629
630 mutex_lock(&event_mutex);
631 filter = call->filter;
632 if (filter && filter->filter_string)
633 trace_seq_printf(s, "%s\n", filter->filter_string);
634 else
635 trace_seq_printf(s, "none\n");
636 mutex_unlock(&event_mutex);
637 }
638
639 void print_subsystem_event_filter(struct event_subsystem *system,
640 struct trace_seq *s)
641 {
642 struct event_filter *filter;
643
644 mutex_lock(&event_mutex);
645 filter = system->filter;
646 if (filter && filter->filter_string)
647 trace_seq_printf(s, "%s\n", filter->filter_string);
648 else
649 trace_seq_printf(s, "none\n");
650 mutex_unlock(&event_mutex);
651 }
652
653 static struct ftrace_event_field *
654 __find_event_field(struct list_head *head, char *name)
655 {
656 struct ftrace_event_field *field;
657
658 list_for_each_entry(field, head, link) {
659 if (!strcmp(field->name, name))
660 return field;
661 }
662
663 return NULL;
664 }
665
666 static struct ftrace_event_field *
667 find_event_field(struct ftrace_event_call *call, char *name)
668 {
669 struct ftrace_event_field *field;
670 struct list_head *head;
671
672 field = __find_event_field(&ftrace_common_fields, name);
673 if (field)
674 return field;
675
676 head = trace_get_fields(call);
677 return __find_event_field(head, name);
678 }
679
680 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
681 {
682 stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
683 if (!stack->preds)
684 return -ENOMEM;
685 stack->index = n_preds;
686 return 0;
687 }
688
689 static void __free_pred_stack(struct pred_stack *stack)
690 {
691 kfree(stack->preds);
692 stack->index = 0;
693 }
694
695 static int __push_pred_stack(struct pred_stack *stack,
696 struct filter_pred *pred)
697 {
698 int index = stack->index;
699
700 if (WARN_ON(index == 0))
701 return -ENOSPC;
702
703 stack->preds[--index] = pred;
704 stack->index = index;
705 return 0;
706 }
707
708 static struct filter_pred *
709 __pop_pred_stack(struct pred_stack *stack)
710 {
711 struct filter_pred *pred;
712 int index = stack->index;
713
714 pred = stack->preds[index++];
715 if (!pred)
716 return NULL;
717
718 stack->index = index;
719 return pred;
720 }
721
722 static int filter_set_pred(struct event_filter *filter,
723 int idx,
724 struct pred_stack *stack,
725 struct filter_pred *src)
726 {
727 struct filter_pred *dest = &filter->preds[idx];
728 struct filter_pred *left;
729 struct filter_pred *right;
730
731 *dest = *src;
732 dest->index = idx;
733
734 if (dest->op == OP_OR || dest->op == OP_AND) {
735 right = __pop_pred_stack(stack);
736 left = __pop_pred_stack(stack);
737 if (!left || !right)
738 return -EINVAL;
739 /*
740 * If both children can be folded
741 * and they are the same op as this op or a leaf,
742 * then this op can be folded.
743 */
744 if (left->index & FILTER_PRED_FOLD &&
745 (left->op == dest->op ||
746 left->left == FILTER_PRED_INVALID) &&
747 right->index & FILTER_PRED_FOLD &&
748 (right->op == dest->op ||
749 right->left == FILTER_PRED_INVALID))
750 dest->index |= FILTER_PRED_FOLD;
751
752 dest->left = left->index & ~FILTER_PRED_FOLD;
753 dest->right = right->index & ~FILTER_PRED_FOLD;
754 left->parent = dest->index & ~FILTER_PRED_FOLD;
755 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
756 } else {
757 /*
758 * Make dest->left invalid to be used as a quick
759 * way to know this is a leaf node.
760 */
761 dest->left = FILTER_PRED_INVALID;
762
763 /* All leafs allow folding the parent ops. */
764 dest->index |= FILTER_PRED_FOLD;
765 }
766
767 return __push_pred_stack(stack, dest);
768 }
769
770 static void __free_preds(struct event_filter *filter)
771 {
772 if (filter->preds) {
773 kfree(filter->preds);
774 filter->preds = NULL;
775 }
776 filter->a_preds = 0;
777 filter->n_preds = 0;
778 }
779
780 static void filter_disable(struct ftrace_event_call *call)
781 {
782 call->flags &= ~TRACE_EVENT_FL_FILTERED;
783 }
784
785 static void __free_filter(struct event_filter *filter)
786 {
787 if (!filter)
788 return;
789
790 __free_preds(filter);
791 kfree(filter->filter_string);
792 kfree(filter);
793 }
794
795 /*
796 * Called when destroying the ftrace_event_call.
797 * The call is being freed, so we do not need to worry about
798 * the call being currently used. This is for module code removing
799 * the tracepoints from within it.
800 */
801 void destroy_preds(struct ftrace_event_call *call)
802 {
803 __free_filter(call->filter);
804 call->filter = NULL;
805 }
806
807 static struct event_filter *__alloc_filter(void)
808 {
809 struct event_filter *filter;
810
811 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
812 return filter;
813 }
814
815 static int __alloc_preds(struct event_filter *filter, int n_preds)
816 {
817 struct filter_pred *pred;
818 int i;
819
820 if (filter->preds)
821 __free_preds(filter);
822
823 filter->preds =
824 kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
825
826 if (!filter->preds)
827 return -ENOMEM;
828
829 filter->a_preds = n_preds;
830 filter->n_preds = 0;
831
832 for (i = 0; i < n_preds; i++) {
833 pred = &filter->preds[i];
834 pred->fn = filter_pred_none;
835 }
836
837 return 0;
838 }
839
840 static void filter_free_subsystem_preds(struct event_subsystem *system)
841 {
842 struct ftrace_event_call *call;
843
844 list_for_each_entry(call, &ftrace_events, list) {
845 if (strcmp(call->class->system, system->name) != 0)
846 continue;
847
848 filter_disable(call);
849 remove_filter_string(call->filter);
850 }
851 }
852
853 static void filter_free_subsystem_filters(struct event_subsystem *system)
854 {
855 struct ftrace_event_call *call;
856
857 list_for_each_entry(call, &ftrace_events, list) {
858 if (strcmp(call->class->system, system->name) != 0)
859 continue;
860 __free_filter(call->filter);
861 call->filter = NULL;
862 }
863 }
864
865 static int filter_add_pred(struct filter_parse_state *ps,
866 struct event_filter *filter,
867 struct filter_pred *pred,
868 struct pred_stack *stack)
869 {
870 int err;
871
872 if (WARN_ON(filter->n_preds == filter->a_preds)) {
873 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
874 return -ENOSPC;
875 }
876
877 err = filter_set_pred(filter, filter->n_preds, stack, pred);
878 if (err)
879 return err;
880
881 filter->n_preds++;
882
883 return 0;
884 }
885
886 int filter_assign_type(const char *type)
887 {
888 if (strstr(type, "__data_loc") && strstr(type, "char"))
889 return FILTER_DYN_STRING;
890
891 if (strchr(type, '[') && strstr(type, "char"))
892 return FILTER_STATIC_STRING;
893
894 return FILTER_OTHER;
895 }
896
897 static bool is_string_field(struct ftrace_event_field *field)
898 {
899 return field->filter_type == FILTER_DYN_STRING ||
900 field->filter_type == FILTER_STATIC_STRING ||
901 field->filter_type == FILTER_PTR_STRING;
902 }
903
904 static int is_legal_op(struct ftrace_event_field *field, int op)
905 {
906 if (is_string_field(field) &&
907 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
908 return 0;
909 if (!is_string_field(field) && op == OP_GLOB)
910 return 0;
911
912 return 1;
913 }
914
915 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
916 int field_is_signed)
917 {
918 filter_pred_fn_t fn = NULL;
919
920 switch (field_size) {
921 case 8:
922 if (op == OP_EQ || op == OP_NE)
923 fn = filter_pred_64;
924 else if (field_is_signed)
925 fn = filter_pred_s64;
926 else
927 fn = filter_pred_u64;
928 break;
929 case 4:
930 if (op == OP_EQ || op == OP_NE)
931 fn = filter_pred_32;
932 else if (field_is_signed)
933 fn = filter_pred_s32;
934 else
935 fn = filter_pred_u32;
936 break;
937 case 2:
938 if (op == OP_EQ || op == OP_NE)
939 fn = filter_pred_16;
940 else if (field_is_signed)
941 fn = filter_pred_s16;
942 else
943 fn = filter_pred_u16;
944 break;
945 case 1:
946 if (op == OP_EQ || op == OP_NE)
947 fn = filter_pred_8;
948 else if (field_is_signed)
949 fn = filter_pred_s8;
950 else
951 fn = filter_pred_u8;
952 break;
953 }
954
955 return fn;
956 }
957
958 static int init_pred(struct filter_parse_state *ps,
959 struct ftrace_event_field *field,
960 struct filter_pred *pred)
961
962 {
963 filter_pred_fn_t fn = filter_pred_none;
964 unsigned long long val;
965 int ret;
966
967 pred->offset = field->offset;
968
969 if (!is_legal_op(field, pred->op)) {
970 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
971 return -EINVAL;
972 }
973
974 if (is_string_field(field)) {
975 filter_build_regex(pred);
976
977 if (field->filter_type == FILTER_STATIC_STRING) {
978 fn = filter_pred_string;
979 pred->regex.field_len = field->size;
980 } else if (field->filter_type == FILTER_DYN_STRING)
981 fn = filter_pred_strloc;
982 else
983 fn = filter_pred_pchar;
984 } else {
985 if (field->is_signed)
986 ret = strict_strtoll(pred->regex.pattern, 0, &val);
987 else
988 ret = strict_strtoull(pred->regex.pattern, 0, &val);
989 if (ret) {
990 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
991 return -EINVAL;
992 }
993 pred->val = val;
994
995 fn = select_comparison_fn(pred->op, field->size,
996 field->is_signed);
997 if (!fn) {
998 parse_error(ps, FILT_ERR_INVALID_OP, 0);
999 return -EINVAL;
1000 }
1001 }
1002
1003 if (pred->op == OP_NE)
1004 pred->not = 1;
1005
1006 pred->fn = fn;
1007 return 0;
1008 }
1009
1010 static void parse_init(struct filter_parse_state *ps,
1011 struct filter_op *ops,
1012 char *infix_string)
1013 {
1014 memset(ps, '\0', sizeof(*ps));
1015
1016 ps->infix.string = infix_string;
1017 ps->infix.cnt = strlen(infix_string);
1018 ps->ops = ops;
1019
1020 INIT_LIST_HEAD(&ps->opstack);
1021 INIT_LIST_HEAD(&ps->postfix);
1022 }
1023
1024 static char infix_next(struct filter_parse_state *ps)
1025 {
1026 ps->infix.cnt--;
1027
1028 return ps->infix.string[ps->infix.tail++];
1029 }
1030
1031 static char infix_peek(struct filter_parse_state *ps)
1032 {
1033 if (ps->infix.tail == strlen(ps->infix.string))
1034 return 0;
1035
1036 return ps->infix.string[ps->infix.tail];
1037 }
1038
1039 static void infix_advance(struct filter_parse_state *ps)
1040 {
1041 ps->infix.cnt--;
1042 ps->infix.tail++;
1043 }
1044
1045 static inline int is_precedence_lower(struct filter_parse_state *ps,
1046 int a, int b)
1047 {
1048 return ps->ops[a].precedence < ps->ops[b].precedence;
1049 }
1050
1051 static inline int is_op_char(struct filter_parse_state *ps, char c)
1052 {
1053 int i;
1054
1055 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1056 if (ps->ops[i].string[0] == c)
1057 return 1;
1058 }
1059
1060 return 0;
1061 }
1062
1063 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1064 {
1065 char nextc = infix_peek(ps);
1066 char opstr[3];
1067 int i;
1068
1069 opstr[0] = firstc;
1070 opstr[1] = nextc;
1071 opstr[2] = '\0';
1072
1073 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1074 if (!strcmp(opstr, ps->ops[i].string)) {
1075 infix_advance(ps);
1076 return ps->ops[i].id;
1077 }
1078 }
1079
1080 opstr[1] = '\0';
1081
1082 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1083 if (!strcmp(opstr, ps->ops[i].string))
1084 return ps->ops[i].id;
1085 }
1086
1087 return OP_NONE;
1088 }
1089
1090 static inline void clear_operand_string(struct filter_parse_state *ps)
1091 {
1092 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1093 ps->operand.tail = 0;
1094 }
1095
1096 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1097 {
1098 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1099 return -EINVAL;
1100
1101 ps->operand.string[ps->operand.tail++] = c;
1102
1103 return 0;
1104 }
1105
1106 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1107 {
1108 struct opstack_op *opstack_op;
1109
1110 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1111 if (!opstack_op)
1112 return -ENOMEM;
1113
1114 opstack_op->op = op;
1115 list_add(&opstack_op->list, &ps->opstack);
1116
1117 return 0;
1118 }
1119
1120 static int filter_opstack_empty(struct filter_parse_state *ps)
1121 {
1122 return list_empty(&ps->opstack);
1123 }
1124
1125 static int filter_opstack_top(struct filter_parse_state *ps)
1126 {
1127 struct opstack_op *opstack_op;
1128
1129 if (filter_opstack_empty(ps))
1130 return OP_NONE;
1131
1132 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1133
1134 return opstack_op->op;
1135 }
1136
1137 static int filter_opstack_pop(struct filter_parse_state *ps)
1138 {
1139 struct opstack_op *opstack_op;
1140 int op;
1141
1142 if (filter_opstack_empty(ps))
1143 return OP_NONE;
1144
1145 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1146 op = opstack_op->op;
1147 list_del(&opstack_op->list);
1148
1149 kfree(opstack_op);
1150
1151 return op;
1152 }
1153
1154 static void filter_opstack_clear(struct filter_parse_state *ps)
1155 {
1156 while (!filter_opstack_empty(ps))
1157 filter_opstack_pop(ps);
1158 }
1159
1160 static char *curr_operand(struct filter_parse_state *ps)
1161 {
1162 return ps->operand.string;
1163 }
1164
1165 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1166 {
1167 struct postfix_elt *elt;
1168
1169 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1170 if (!elt)
1171 return -ENOMEM;
1172
1173 elt->op = OP_NONE;
1174 elt->operand = kstrdup(operand, GFP_KERNEL);
1175 if (!elt->operand) {
1176 kfree(elt);
1177 return -ENOMEM;
1178 }
1179
1180 list_add_tail(&elt->list, &ps->postfix);
1181
1182 return 0;
1183 }
1184
1185 static int postfix_append_op(struct filter_parse_state *ps, int op)
1186 {
1187 struct postfix_elt *elt;
1188
1189 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1190 if (!elt)
1191 return -ENOMEM;
1192
1193 elt->op = op;
1194 elt->operand = NULL;
1195
1196 list_add_tail(&elt->list, &ps->postfix);
1197
1198 return 0;
1199 }
1200
1201 static void postfix_clear(struct filter_parse_state *ps)
1202 {
1203 struct postfix_elt *elt;
1204
1205 while (!list_empty(&ps->postfix)) {
1206 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1207 list_del(&elt->list);
1208 kfree(elt->operand);
1209 kfree(elt);
1210 }
1211 }
1212
1213 static int filter_parse(struct filter_parse_state *ps)
1214 {
1215 int in_string = 0;
1216 int op, top_op;
1217 char ch;
1218
1219 while ((ch = infix_next(ps))) {
1220 if (ch == '"') {
1221 in_string ^= 1;
1222 continue;
1223 }
1224
1225 if (in_string)
1226 goto parse_operand;
1227
1228 if (isspace(ch))
1229 continue;
1230
1231 if (is_op_char(ps, ch)) {
1232 op = infix_get_op(ps, ch);
1233 if (op == OP_NONE) {
1234 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1235 return -EINVAL;
1236 }
1237
1238 if (strlen(curr_operand(ps))) {
1239 postfix_append_operand(ps, curr_operand(ps));
1240 clear_operand_string(ps);
1241 }
1242
1243 while (!filter_opstack_empty(ps)) {
1244 top_op = filter_opstack_top(ps);
1245 if (!is_precedence_lower(ps, top_op, op)) {
1246 top_op = filter_opstack_pop(ps);
1247 postfix_append_op(ps, top_op);
1248 continue;
1249 }
1250 break;
1251 }
1252
1253 filter_opstack_push(ps, op);
1254 continue;
1255 }
1256
1257 if (ch == '(') {
1258 filter_opstack_push(ps, OP_OPEN_PAREN);
1259 continue;
1260 }
1261
1262 if (ch == ')') {
1263 if (strlen(curr_operand(ps))) {
1264 postfix_append_operand(ps, curr_operand(ps));
1265 clear_operand_string(ps);
1266 }
1267
1268 top_op = filter_opstack_pop(ps);
1269 while (top_op != OP_NONE) {
1270 if (top_op == OP_OPEN_PAREN)
1271 break;
1272 postfix_append_op(ps, top_op);
1273 top_op = filter_opstack_pop(ps);
1274 }
1275 if (top_op == OP_NONE) {
1276 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1277 return -EINVAL;
1278 }
1279 continue;
1280 }
1281 parse_operand:
1282 if (append_operand_char(ps, ch)) {
1283 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1284 return -EINVAL;
1285 }
1286 }
1287
1288 if (strlen(curr_operand(ps)))
1289 postfix_append_operand(ps, curr_operand(ps));
1290
1291 while (!filter_opstack_empty(ps)) {
1292 top_op = filter_opstack_pop(ps);
1293 if (top_op == OP_NONE)
1294 break;
1295 if (top_op == OP_OPEN_PAREN) {
1296 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1297 return -EINVAL;
1298 }
1299 postfix_append_op(ps, top_op);
1300 }
1301
1302 return 0;
1303 }
1304
1305 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1306 struct ftrace_event_call *call,
1307 int op, char *operand1, char *operand2)
1308 {
1309 struct ftrace_event_field *field;
1310 static struct filter_pred pred;
1311
1312 memset(&pred, 0, sizeof(pred));
1313 pred.op = op;
1314
1315 if (op == OP_AND || op == OP_OR)
1316 return &pred;
1317
1318 if (!operand1 || !operand2) {
1319 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1320 return NULL;
1321 }
1322
1323 field = find_event_field(call, operand1);
1324 if (!field) {
1325 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1326 return NULL;
1327 }
1328
1329 strcpy(pred.regex.pattern, operand2);
1330 pred.regex.len = strlen(pred.regex.pattern);
1331
1332 #ifdef CONFIG_FTRACE_STARTUP_TEST
1333 pred.field = field;
1334 #endif
1335 return init_pred(ps, field, &pred) ? NULL : &pred;
1336 }
1337
1338 static int check_preds(struct filter_parse_state *ps)
1339 {
1340 int n_normal_preds = 0, n_logical_preds = 0;
1341 struct postfix_elt *elt;
1342
1343 list_for_each_entry(elt, &ps->postfix, list) {
1344 if (elt->op == OP_NONE)
1345 continue;
1346
1347 if (elt->op == OP_AND || elt->op == OP_OR) {
1348 n_logical_preds++;
1349 continue;
1350 }
1351 n_normal_preds++;
1352 }
1353
1354 if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1355 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1356 return -EINVAL;
1357 }
1358
1359 return 0;
1360 }
1361
1362 static int count_preds(struct filter_parse_state *ps)
1363 {
1364 struct postfix_elt *elt;
1365 int n_preds = 0;
1366
1367 list_for_each_entry(elt, &ps->postfix, list) {
1368 if (elt->op == OP_NONE)
1369 continue;
1370 n_preds++;
1371 }
1372
1373 return n_preds;
1374 }
1375
1376 struct check_pred_data {
1377 int count;
1378 int max;
1379 };
1380
1381 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1382 int *err, void *data)
1383 {
1384 struct check_pred_data *d = data;
1385
1386 if (WARN_ON(d->count++ > d->max)) {
1387 *err = -EINVAL;
1388 return WALK_PRED_ABORT;
1389 }
1390 return WALK_PRED_DEFAULT;
1391 }
1392
1393 /*
1394 * The tree is walked at filtering of an event. If the tree is not correctly
1395 * built, it may cause an infinite loop. Check here that the tree does
1396 * indeed terminate.
1397 */
1398 static int check_pred_tree(struct event_filter *filter,
1399 struct filter_pred *root)
1400 {
1401 struct check_pred_data data = {
1402 /*
1403 * The max that we can hit a node is three times.
1404 * Once going down, once coming up from left, and
1405 * once coming up from right. This is more than enough
1406 * since leafs are only hit a single time.
1407 */
1408 .max = 3 * filter->n_preds,
1409 .count = 0,
1410 };
1411
1412 return walk_pred_tree(filter->preds, root,
1413 check_pred_tree_cb, &data);
1414 }
1415
1416 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1417 int *err, void *data)
1418 {
1419 int *count = data;
1420
1421 if ((move == MOVE_DOWN) &&
1422 (pred->left == FILTER_PRED_INVALID))
1423 (*count)++;
1424
1425 return WALK_PRED_DEFAULT;
1426 }
1427
1428 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1429 {
1430 int count = 0, ret;
1431
1432 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1433 WARN_ON(ret);
1434 return count;
1435 }
1436
1437 struct fold_pred_data {
1438 struct filter_pred *root;
1439 int count;
1440 int children;
1441 };
1442
1443 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1444 int *err, void *data)
1445 {
1446 struct fold_pred_data *d = data;
1447 struct filter_pred *root = d->root;
1448
1449 if (move != MOVE_DOWN)
1450 return WALK_PRED_DEFAULT;
1451 if (pred->left != FILTER_PRED_INVALID)
1452 return WALK_PRED_DEFAULT;
1453
1454 if (WARN_ON(d->count == d->children)) {
1455 *err = -EINVAL;
1456 return WALK_PRED_ABORT;
1457 }
1458
1459 pred->index &= ~FILTER_PRED_FOLD;
1460 root->ops[d->count++] = pred->index;
1461 return WALK_PRED_DEFAULT;
1462 }
1463
1464 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1465 {
1466 struct fold_pred_data data = {
1467 .root = root,
1468 .count = 0,
1469 };
1470 int children;
1471
1472 /* No need to keep the fold flag */
1473 root->index &= ~FILTER_PRED_FOLD;
1474
1475 /* If the root is a leaf then do nothing */
1476 if (root->left == FILTER_PRED_INVALID)
1477 return 0;
1478
1479 /* count the children */
1480 children = count_leafs(preds, &preds[root->left]);
1481 children += count_leafs(preds, &preds[root->right]);
1482
1483 root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
1484 if (!root->ops)
1485 return -ENOMEM;
1486
1487 root->val = children;
1488 data.children = children;
1489 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1490 }
1491
1492 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1493 int *err, void *data)
1494 {
1495 struct filter_pred *preds = data;
1496
1497 if (move != MOVE_DOWN)
1498 return WALK_PRED_DEFAULT;
1499 if (!(pred->index & FILTER_PRED_FOLD))
1500 return WALK_PRED_DEFAULT;
1501
1502 *err = fold_pred(preds, pred);
1503 if (*err)
1504 return WALK_PRED_ABORT;
1505
1506 /* eveyrhing below is folded, continue with parent */
1507 return WALK_PRED_PARENT;
1508 }
1509
1510 /*
1511 * To optimize the processing of the ops, if we have several "ors" or
1512 * "ands" together, we can put them in an array and process them all
1513 * together speeding up the filter logic.
1514 */
1515 static int fold_pred_tree(struct event_filter *filter,
1516 struct filter_pred *root)
1517 {
1518 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1519 filter->preds);
1520 }
1521
1522 static int replace_preds(struct ftrace_event_call *call,
1523 struct event_filter *filter,
1524 struct filter_parse_state *ps,
1525 char *filter_string,
1526 bool dry_run)
1527 {
1528 char *operand1 = NULL, *operand2 = NULL;
1529 struct filter_pred *pred;
1530 struct filter_pred *root;
1531 struct postfix_elt *elt;
1532 struct pred_stack stack = { }; /* init to NULL */
1533 int err;
1534 int n_preds = 0;
1535
1536 n_preds = count_preds(ps);
1537 if (n_preds >= MAX_FILTER_PRED) {
1538 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1539 return -ENOSPC;
1540 }
1541
1542 err = check_preds(ps);
1543 if (err)
1544 return err;
1545
1546 if (!dry_run) {
1547 err = __alloc_pred_stack(&stack, n_preds);
1548 if (err)
1549 return err;
1550 err = __alloc_preds(filter, n_preds);
1551 if (err)
1552 goto fail;
1553 }
1554
1555 n_preds = 0;
1556 list_for_each_entry(elt, &ps->postfix, list) {
1557 if (elt->op == OP_NONE) {
1558 if (!operand1)
1559 operand1 = elt->operand;
1560 else if (!operand2)
1561 operand2 = elt->operand;
1562 else {
1563 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1564 err = -EINVAL;
1565 goto fail;
1566 }
1567 continue;
1568 }
1569
1570 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1571 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1572 err = -ENOSPC;
1573 goto fail;
1574 }
1575
1576 pred = create_pred(ps, call, elt->op, operand1, operand2);
1577 if (!pred) {
1578 err = -EINVAL;
1579 goto fail;
1580 }
1581
1582 if (!dry_run) {
1583 err = filter_add_pred(ps, filter, pred, &stack);
1584 if (err)
1585 goto fail;
1586 }
1587
1588 operand1 = operand2 = NULL;
1589 }
1590
1591 if (!dry_run) {
1592 /* We should have one item left on the stack */
1593 pred = __pop_pred_stack(&stack);
1594 if (!pred)
1595 return -EINVAL;
1596 /* This item is where we start from in matching */
1597 root = pred;
1598 /* Make sure the stack is empty */
1599 pred = __pop_pred_stack(&stack);
1600 if (WARN_ON(pred)) {
1601 err = -EINVAL;
1602 filter->root = NULL;
1603 goto fail;
1604 }
1605 err = check_pred_tree(filter, root);
1606 if (err)
1607 goto fail;
1608
1609 /* Optimize the tree */
1610 err = fold_pred_tree(filter, root);
1611 if (err)
1612 goto fail;
1613
1614 /* We don't set root until we know it works */
1615 barrier();
1616 filter->root = root;
1617 }
1618
1619 err = 0;
1620 fail:
1621 __free_pred_stack(&stack);
1622 return err;
1623 }
1624
1625 struct filter_list {
1626 struct list_head list;
1627 struct event_filter *filter;
1628 };
1629
1630 static int replace_system_preds(struct event_subsystem *system,
1631 struct filter_parse_state *ps,
1632 char *filter_string)
1633 {
1634 struct ftrace_event_call *call;
1635 struct filter_list *filter_item;
1636 struct filter_list *tmp;
1637 LIST_HEAD(filter_list);
1638 bool fail = true;
1639 int err;
1640
1641 list_for_each_entry(call, &ftrace_events, list) {
1642
1643 if (strcmp(call->class->system, system->name) != 0)
1644 continue;
1645
1646 /*
1647 * Try to see if the filter can be applied
1648 * (filter arg is ignored on dry_run)
1649 */
1650 err = replace_preds(call, NULL, ps, filter_string, true);
1651 if (err)
1652 goto fail;
1653 }
1654
1655 list_for_each_entry(call, &ftrace_events, list) {
1656 struct event_filter *filter;
1657
1658 if (strcmp(call->class->system, system->name) != 0)
1659 continue;
1660
1661 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1662 if (!filter_item)
1663 goto fail_mem;
1664
1665 list_add_tail(&filter_item->list, &filter_list);
1666
1667 filter_item->filter = __alloc_filter();
1668 if (!filter_item->filter)
1669 goto fail_mem;
1670 filter = filter_item->filter;
1671
1672 /* Can only fail on no memory */
1673 err = replace_filter_string(filter, filter_string);
1674 if (err)
1675 goto fail_mem;
1676
1677 err = replace_preds(call, filter, ps, filter_string, false);
1678 if (err) {
1679 filter_disable(call);
1680 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1681 append_filter_err(ps, filter);
1682 } else
1683 call->flags |= TRACE_EVENT_FL_FILTERED;
1684 /*
1685 * Regardless of if this returned an error, we still
1686 * replace the filter for the call.
1687 */
1688 filter = call->filter;
1689 call->filter = filter_item->filter;
1690 filter_item->filter = filter;
1691
1692 fail = false;
1693 }
1694
1695 if (fail)
1696 goto fail;
1697
1698 /*
1699 * The calls can still be using the old filters.
1700 * Do a synchronize_sched() to ensure all calls are
1701 * done with them before we free them.
1702 */
1703 synchronize_sched();
1704 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1705 __free_filter(filter_item->filter);
1706 list_del(&filter_item->list);
1707 kfree(filter_item);
1708 }
1709 return 0;
1710 fail:
1711 /* No call succeeded */
1712 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1713 list_del(&filter_item->list);
1714 kfree(filter_item);
1715 }
1716 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1717 return -EINVAL;
1718 fail_mem:
1719 /* If any call succeeded, we still need to sync */
1720 if (!fail)
1721 synchronize_sched();
1722 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1723 __free_filter(filter_item->filter);
1724 list_del(&filter_item->list);
1725 kfree(filter_item);
1726 }
1727 return -ENOMEM;
1728 }
1729
1730 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1731 {
1732 struct filter_parse_state *ps;
1733 struct event_filter *filter;
1734 struct event_filter *tmp;
1735 int err = 0;
1736
1737 mutex_lock(&event_mutex);
1738
1739 if (!strcmp(strstrip(filter_string), "0")) {
1740 filter_disable(call);
1741 filter = call->filter;
1742 if (!filter)
1743 goto out_unlock;
1744 call->filter = NULL;
1745 /* Make sure the filter is not being used */
1746 synchronize_sched();
1747 __free_filter(filter);
1748 goto out_unlock;
1749 }
1750
1751 err = -ENOMEM;
1752 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1753 if (!ps)
1754 goto out_unlock;
1755
1756 filter = __alloc_filter();
1757 if (!filter) {
1758 kfree(ps);
1759 goto out_unlock;
1760 }
1761
1762 replace_filter_string(filter, filter_string);
1763
1764 parse_init(ps, filter_ops, filter_string);
1765 err = filter_parse(ps);
1766 if (err) {
1767 append_filter_err(ps, filter);
1768 goto out;
1769 }
1770
1771 err = replace_preds(call, filter, ps, filter_string, false);
1772 if (err) {
1773 filter_disable(call);
1774 append_filter_err(ps, filter);
1775 } else
1776 call->flags |= TRACE_EVENT_FL_FILTERED;
1777 out:
1778 /*
1779 * Always swap the call filter with the new filter
1780 * even if there was an error. If there was an error
1781 * in the filter, we disable the filter and show the error
1782 * string
1783 */
1784 tmp = call->filter;
1785 call->filter = filter;
1786 if (tmp) {
1787 /* Make sure the call is done with the filter */
1788 synchronize_sched();
1789 __free_filter(tmp);
1790 }
1791 filter_opstack_clear(ps);
1792 postfix_clear(ps);
1793 kfree(ps);
1794 out_unlock:
1795 mutex_unlock(&event_mutex);
1796
1797 return err;
1798 }
1799
1800 int apply_subsystem_event_filter(struct event_subsystem *system,
1801 char *filter_string)
1802 {
1803 struct filter_parse_state *ps;
1804 struct event_filter *filter;
1805 int err = 0;
1806
1807 mutex_lock(&event_mutex);
1808
1809 /* Make sure the system still has events */
1810 if (!system->nr_events) {
1811 err = -ENODEV;
1812 goto out_unlock;
1813 }
1814
1815 if (!strcmp(strstrip(filter_string), "0")) {
1816 filter_free_subsystem_preds(system);
1817 remove_filter_string(system->filter);
1818 filter = system->filter;
1819 system->filter = NULL;
1820 /* Ensure all filters are no longer used */
1821 synchronize_sched();
1822 filter_free_subsystem_filters(system);
1823 __free_filter(filter);
1824 goto out_unlock;
1825 }
1826
1827 err = -ENOMEM;
1828 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1829 if (!ps)
1830 goto out_unlock;
1831
1832 filter = __alloc_filter();
1833 if (!filter)
1834 goto out;
1835
1836 replace_filter_string(filter, filter_string);
1837 /*
1838 * No event actually uses the system filter
1839 * we can free it without synchronize_sched().
1840 */
1841 __free_filter(system->filter);
1842 system->filter = filter;
1843
1844 parse_init(ps, filter_ops, filter_string);
1845 err = filter_parse(ps);
1846 if (err) {
1847 append_filter_err(ps, system->filter);
1848 goto out;
1849 }
1850
1851 err = replace_system_preds(system, ps, filter_string);
1852 if (err)
1853 append_filter_err(ps, system->filter);
1854
1855 out:
1856 filter_opstack_clear(ps);
1857 postfix_clear(ps);
1858 kfree(ps);
1859 out_unlock:
1860 mutex_unlock(&event_mutex);
1861
1862 return err;
1863 }
1864
1865 #ifdef CONFIG_PERF_EVENTS
1866
1867 void ftrace_profile_free_filter(struct perf_event *event)
1868 {
1869 struct event_filter *filter = event->filter;
1870
1871 event->filter = NULL;
1872 __free_filter(filter);
1873 }
1874
1875 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1876 char *filter_str)
1877 {
1878 int err;
1879 struct event_filter *filter;
1880 struct filter_parse_state *ps;
1881 struct ftrace_event_call *call;
1882
1883 mutex_lock(&event_mutex);
1884
1885 call = event->tp_event;
1886
1887 err = -EINVAL;
1888 if (!call)
1889 goto out_unlock;
1890
1891 err = -EEXIST;
1892 if (event->filter)
1893 goto out_unlock;
1894
1895 filter = __alloc_filter();
1896 if (!filter) {
1897 err = PTR_ERR(filter);
1898 goto out_unlock;
1899 }
1900
1901 err = -ENOMEM;
1902 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1903 if (!ps)
1904 goto free_filter;
1905
1906 parse_init(ps, filter_ops, filter_str);
1907 err = filter_parse(ps);
1908 if (err)
1909 goto free_ps;
1910
1911 err = replace_preds(call, filter, ps, filter_str, false);
1912 if (!err)
1913 event->filter = filter;
1914
1915 free_ps:
1916 filter_opstack_clear(ps);
1917 postfix_clear(ps);
1918 kfree(ps);
1919
1920 free_filter:
1921 if (err)
1922 __free_filter(filter);
1923
1924 out_unlock:
1925 mutex_unlock(&event_mutex);
1926
1927 return err;
1928 }
1929
1930 #endif /* CONFIG_PERF_EVENTS */
1931
1932 #ifdef CONFIG_FTRACE_STARTUP_TEST
1933
1934 #include <linux/types.h>
1935 #include <linux/tracepoint.h>
1936
1937 #define CREATE_TRACE_POINTS
1938 #include "trace_events_filter_test.h"
1939
1940 static int test_get_filter(char *filter_str, struct ftrace_event_call *call,
1941 struct event_filter **pfilter)
1942 {
1943 struct event_filter *filter;
1944 struct filter_parse_state *ps;
1945 int err = -ENOMEM;
1946
1947 filter = __alloc_filter();
1948 if (!filter)
1949 goto out;
1950
1951 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1952 if (!ps)
1953 goto free_filter;
1954
1955 parse_init(ps, filter_ops, filter_str);
1956 err = filter_parse(ps);
1957 if (err)
1958 goto free_ps;
1959
1960 err = replace_preds(call, filter, ps, filter_str, false);
1961 if (!err)
1962 *pfilter = filter;
1963
1964 free_ps:
1965 filter_opstack_clear(ps);
1966 postfix_clear(ps);
1967 kfree(ps);
1968
1969 free_filter:
1970 if (err)
1971 __free_filter(filter);
1972
1973 out:
1974 return err;
1975 }
1976
1977 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
1978 { \
1979 .filter = FILTER, \
1980 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
1981 .e = ve, .f = vf, .g = vg, .h = vh }, \
1982 .match = m, \
1983 .not_visited = nvisit, \
1984 }
1985 #define YES 1
1986 #define NO 0
1987
1988 static struct test_filter_data_t {
1989 char *filter;
1990 struct ftrace_raw_ftrace_test_filter rec;
1991 int match;
1992 char *not_visited;
1993 } test_filter_data[] = {
1994 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
1995 "e == 1 && f == 1 && g == 1 && h == 1"
1996 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
1997 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
1998 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
1999 #undef FILTER
2000 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2001 "e == 1 || f == 1 || g == 1 || h == 1"
2002 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2003 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2004 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2005 #undef FILTER
2006 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2007 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2008 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2009 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2010 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2011 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2012 #undef FILTER
2013 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2014 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2015 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2016 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2017 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2018 #undef FILTER
2019 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2020 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2021 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2022 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2023 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2024 #undef FILTER
2025 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2026 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2027 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2028 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2029 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2030 #undef FILTER
2031 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2032 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2033 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2034 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2035 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2036 #undef FILTER
2037 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2038 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2039 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2040 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2041 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2042 };
2043
2044 #undef DATA_REC
2045 #undef FILTER
2046 #undef YES
2047 #undef NO
2048
2049 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2050
2051 static int test_pred_visited;
2052
2053 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2054 {
2055 struct ftrace_event_field *field = pred->field;
2056
2057 test_pred_visited = 1;
2058 printk(KERN_INFO "\npred visited %s\n", field->name);
2059 return 1;
2060 }
2061
2062 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2063 int *err, void *data)
2064 {
2065 char *fields = data;
2066
2067 if ((move == MOVE_DOWN) &&
2068 (pred->left == FILTER_PRED_INVALID)) {
2069 struct ftrace_event_field *field = pred->field;
2070
2071 if (!field) {
2072 WARN(1, "all leafs should have field defined");
2073 return WALK_PRED_DEFAULT;
2074 }
2075 if (!strchr(fields, *field->name))
2076 return WALK_PRED_DEFAULT;
2077
2078 WARN_ON(!pred->fn);
2079 pred->fn = test_pred_visited_fn;
2080 }
2081 return WALK_PRED_DEFAULT;
2082 }
2083
2084 static __init int ftrace_test_event_filter(void)
2085 {
2086 int i;
2087
2088 printk(KERN_INFO "Testing ftrace filter: ");
2089
2090 for (i = 0; i < DATA_CNT; i++) {
2091 struct event_filter *filter = NULL;
2092 struct test_filter_data_t *d = &test_filter_data[i];
2093 int err;
2094
2095 err = test_get_filter(d->filter, &event_ftrace_test_filter,
2096 &filter);
2097 if (err) {
2098 printk(KERN_INFO
2099 "Failed to get filter for '%s', err %d\n",
2100 d->filter, err);
2101 break;
2102 }
2103
2104 if (*d->not_visited)
2105 walk_pred_tree(filter->preds, filter->root,
2106 test_walk_pred_cb,
2107 d->not_visited);
2108
2109 test_pred_visited = 0;
2110 err = filter_match_preds(filter, &d->rec);
2111
2112 __free_filter(filter);
2113
2114 if (test_pred_visited) {
2115 printk(KERN_INFO
2116 "Failed, unwanted pred visited for filter %s\n",
2117 d->filter);
2118 break;
2119 }
2120
2121 if (err != d->match) {
2122 printk(KERN_INFO
2123 "Failed to match filter '%s', expected %d\n",
2124 d->filter, d->match);
2125 break;
2126 }
2127 }
2128
2129 if (i == DATA_CNT)
2130 printk(KERN_CONT "OK\n");
2131
2132 return 0;
2133 }
2134
2135 late_initcall(ftrace_test_event_filter);
2136
2137 #endif /* CONFIG_FTRACE_STARTUP_TEST */