]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/trace/trace_events_filter.c
ipv4: convert dst_metrics.refcnt from atomic_t to refcount_t
[mirror_ubuntu-artful-kernel.git] / kernel / trace / trace_events_filter.c
1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_BAND,
48 OP_NOT,
49 OP_NONE,
50 OP_OPEN_PAREN,
51 };
52
53 struct filter_op {
54 int id;
55 char *string;
56 int precedence;
57 };
58
59 /* Order must be the same as enum filter_op_ids above */
60 static struct filter_op filter_ops[] = {
61 { OP_OR, "||", 1 },
62 { OP_AND, "&&", 2 },
63 { OP_GLOB, "~", 4 },
64 { OP_NE, "!=", 4 },
65 { OP_EQ, "==", 4 },
66 { OP_LT, "<", 5 },
67 { OP_LE, "<=", 5 },
68 { OP_GT, ">", 5 },
69 { OP_GE, ">=", 5 },
70 { OP_BAND, "&", 6 },
71 { OP_NOT, "!", 6 },
72 { OP_NONE, "OP_NONE", 0 },
73 { OP_OPEN_PAREN, "(", 0 },
74 };
75
76 enum {
77 FILT_ERR_NONE,
78 FILT_ERR_INVALID_OP,
79 FILT_ERR_UNBALANCED_PAREN,
80 FILT_ERR_TOO_MANY_OPERANDS,
81 FILT_ERR_OPERAND_TOO_LONG,
82 FILT_ERR_FIELD_NOT_FOUND,
83 FILT_ERR_ILLEGAL_FIELD_OP,
84 FILT_ERR_ILLEGAL_INTVAL,
85 FILT_ERR_BAD_SUBSYS_FILTER,
86 FILT_ERR_TOO_MANY_PREDS,
87 FILT_ERR_MISSING_FIELD,
88 FILT_ERR_INVALID_FILTER,
89 FILT_ERR_IP_FIELD_ONLY,
90 FILT_ERR_ILLEGAL_NOT_OP,
91 };
92
93 static char *err_text[] = {
94 "No error",
95 "Invalid operator",
96 "Unbalanced parens",
97 "Too many operands",
98 "Operand too long",
99 "Field not found",
100 "Illegal operation for field type",
101 "Illegal integer value",
102 "Couldn't find or set field in one of a subsystem's events",
103 "Too many terms in predicate expression",
104 "Missing field name and/or value",
105 "Meaningless filter expression",
106 "Only 'ip' field is supported for function trace",
107 "Illegal use of '!'",
108 };
109
110 struct opstack_op {
111 enum filter_op_ids op;
112 struct list_head list;
113 };
114
115 struct postfix_elt {
116 enum filter_op_ids op;
117 char *operand;
118 struct list_head list;
119 };
120
121 struct filter_parse_state {
122 struct filter_op *ops;
123 struct list_head opstack;
124 struct list_head postfix;
125 int lasterr;
126 int lasterr_pos;
127
128 struct {
129 char *string;
130 unsigned int cnt;
131 unsigned int tail;
132 } infix;
133
134 struct {
135 char string[MAX_FILTER_STR_VAL];
136 int pos;
137 unsigned int tail;
138 } operand;
139 };
140
141 struct pred_stack {
142 struct filter_pred **preds;
143 int index;
144 };
145
146 /* If not of not match is equal to not of not, then it is a match */
147 #define DEFINE_COMPARISON_PRED(type) \
148 static int filter_pred_LT_##type(struct filter_pred *pred, void *event) \
149 { \
150 type *addr = (type *)(event + pred->offset); \
151 type val = (type)pred->val; \
152 int match = (*addr < val); \
153 return !!match == !pred->not; \
154 } \
155 static int filter_pred_LE_##type(struct filter_pred *pred, void *event) \
156 { \
157 type *addr = (type *)(event + pred->offset); \
158 type val = (type)pred->val; \
159 int match = (*addr <= val); \
160 return !!match == !pred->not; \
161 } \
162 static int filter_pred_GT_##type(struct filter_pred *pred, void *event) \
163 { \
164 type *addr = (type *)(event + pred->offset); \
165 type val = (type)pred->val; \
166 int match = (*addr > val); \
167 return !!match == !pred->not; \
168 } \
169 static int filter_pred_GE_##type(struct filter_pred *pred, void *event) \
170 { \
171 type *addr = (type *)(event + pred->offset); \
172 type val = (type)pred->val; \
173 int match = (*addr >= val); \
174 return !!match == !pred->not; \
175 } \
176 static int filter_pred_BAND_##type(struct filter_pred *pred, void *event) \
177 { \
178 type *addr = (type *)(event + pred->offset); \
179 type val = (type)pred->val; \
180 int match = !!(*addr & val); \
181 return match == !pred->not; \
182 } \
183 static const filter_pred_fn_t pred_funcs_##type[] = { \
184 filter_pred_LT_##type, \
185 filter_pred_LE_##type, \
186 filter_pred_GT_##type, \
187 filter_pred_GE_##type, \
188 filter_pred_BAND_##type, \
189 };
190
191 #define PRED_FUNC_START OP_LT
192
193 #define DEFINE_EQUALITY_PRED(size) \
194 static int filter_pred_##size(struct filter_pred *pred, void *event) \
195 { \
196 u##size *addr = (u##size *)(event + pred->offset); \
197 u##size val = (u##size)pred->val; \
198 int match; \
199 \
200 match = (val == *addr) ^ pred->not; \
201 \
202 return match; \
203 }
204
205 DEFINE_COMPARISON_PRED(s64);
206 DEFINE_COMPARISON_PRED(u64);
207 DEFINE_COMPARISON_PRED(s32);
208 DEFINE_COMPARISON_PRED(u32);
209 DEFINE_COMPARISON_PRED(s16);
210 DEFINE_COMPARISON_PRED(u16);
211 DEFINE_COMPARISON_PRED(s8);
212 DEFINE_COMPARISON_PRED(u8);
213
214 DEFINE_EQUALITY_PRED(64);
215 DEFINE_EQUALITY_PRED(32);
216 DEFINE_EQUALITY_PRED(16);
217 DEFINE_EQUALITY_PRED(8);
218
219 /* Filter predicate for fixed sized arrays of characters */
220 static int filter_pred_string(struct filter_pred *pred, void *event)
221 {
222 char *addr = (char *)(event + pred->offset);
223 int cmp, match;
224
225 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
226
227 match = cmp ^ pred->not;
228
229 return match;
230 }
231
232 /* Filter predicate for char * pointers */
233 static int filter_pred_pchar(struct filter_pred *pred, void *event)
234 {
235 char **addr = (char **)(event + pred->offset);
236 int cmp, match;
237 int len = strlen(*addr) + 1; /* including tailing '\0' */
238
239 cmp = pred->regex.match(*addr, &pred->regex, len);
240
241 match = cmp ^ pred->not;
242
243 return match;
244 }
245
246 /*
247 * Filter predicate for dynamic sized arrays of characters.
248 * These are implemented through a list of strings at the end
249 * of the entry.
250 * Also each of these strings have a field in the entry which
251 * contains its offset from the beginning of the entry.
252 * We have then first to get this field, dereference it
253 * and add it to the address of the entry, and at last we have
254 * the address of the string.
255 */
256 static int filter_pred_strloc(struct filter_pred *pred, void *event)
257 {
258 u32 str_item = *(u32 *)(event + pred->offset);
259 int str_loc = str_item & 0xffff;
260 int str_len = str_item >> 16;
261 char *addr = (char *)(event + str_loc);
262 int cmp, match;
263
264 cmp = pred->regex.match(addr, &pred->regex, str_len);
265
266 match = cmp ^ pred->not;
267
268 return match;
269 }
270
271 /* Filter predicate for CPUs. */
272 static int filter_pred_cpu(struct filter_pred *pred, void *event)
273 {
274 int cpu, cmp;
275 int match = 0;
276
277 cpu = raw_smp_processor_id();
278 cmp = pred->val;
279
280 switch (pred->op) {
281 case OP_EQ:
282 match = cpu == cmp;
283 break;
284 case OP_LT:
285 match = cpu < cmp;
286 break;
287 case OP_LE:
288 match = cpu <= cmp;
289 break;
290 case OP_GT:
291 match = cpu > cmp;
292 break;
293 case OP_GE:
294 match = cpu >= cmp;
295 break;
296 default:
297 break;
298 }
299
300 return !!match == !pred->not;
301 }
302
303 /* Filter predicate for COMM. */
304 static int filter_pred_comm(struct filter_pred *pred, void *event)
305 {
306 int cmp, match;
307
308 cmp = pred->regex.match(current->comm, &pred->regex,
309 pred->regex.field_len);
310 match = cmp ^ pred->not;
311
312 return match;
313 }
314
315 static int filter_pred_none(struct filter_pred *pred, void *event)
316 {
317 return 0;
318 }
319
320 /*
321 * regex_match_foo - Basic regex callbacks
322 *
323 * @str: the string to be searched
324 * @r: the regex structure containing the pattern string
325 * @len: the length of the string to be searched (including '\0')
326 *
327 * Note:
328 * - @str might not be NULL-terminated if it's of type DYN_STRING
329 * or STATIC_STRING
330 */
331
332 static int regex_match_full(char *str, struct regex *r, int len)
333 {
334 if (strncmp(str, r->pattern, len) == 0)
335 return 1;
336 return 0;
337 }
338
339 static int regex_match_front(char *str, struct regex *r, int len)
340 {
341 if (strncmp(str, r->pattern, r->len) == 0)
342 return 1;
343 return 0;
344 }
345
346 static int regex_match_middle(char *str, struct regex *r, int len)
347 {
348 if (strnstr(str, r->pattern, len))
349 return 1;
350 return 0;
351 }
352
353 static int regex_match_end(char *str, struct regex *r, int len)
354 {
355 int strlen = len - 1;
356
357 if (strlen >= r->len &&
358 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
359 return 1;
360 return 0;
361 }
362
363 static int regex_match_glob(char *str, struct regex *r, int len __maybe_unused)
364 {
365 if (glob_match(r->pattern, str))
366 return 1;
367 return 0;
368 }
369 /**
370 * filter_parse_regex - parse a basic regex
371 * @buff: the raw regex
372 * @len: length of the regex
373 * @search: will point to the beginning of the string to compare
374 * @not: tell whether the match will have to be inverted
375 *
376 * This passes in a buffer containing a regex and this function will
377 * set search to point to the search part of the buffer and
378 * return the type of search it is (see enum above).
379 * This does modify buff.
380 *
381 * Returns enum type.
382 * search returns the pointer to use for comparison.
383 * not returns 1 if buff started with a '!'
384 * 0 otherwise.
385 */
386 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
387 {
388 int type = MATCH_FULL;
389 int i;
390
391 if (buff[0] == '!') {
392 *not = 1;
393 buff++;
394 len--;
395 } else
396 *not = 0;
397
398 *search = buff;
399
400 for (i = 0; i < len; i++) {
401 if (buff[i] == '*') {
402 if (!i) {
403 *search = buff + 1;
404 type = MATCH_END_ONLY;
405 } else if (i == len - 1) {
406 if (type == MATCH_END_ONLY)
407 type = MATCH_MIDDLE_ONLY;
408 else
409 type = MATCH_FRONT_ONLY;
410 buff[i] = 0;
411 break;
412 } else { /* pattern continues, use full glob */
413 type = MATCH_GLOB;
414 break;
415 }
416 } else if (strchr("[?\\", buff[i])) {
417 type = MATCH_GLOB;
418 break;
419 }
420 }
421
422 return type;
423 }
424
425 static void filter_build_regex(struct filter_pred *pred)
426 {
427 struct regex *r = &pred->regex;
428 char *search;
429 enum regex_type type = MATCH_FULL;
430 int not = 0;
431
432 if (pred->op == OP_GLOB) {
433 type = filter_parse_regex(r->pattern, r->len, &search, &not);
434 r->len = strlen(search);
435 memmove(r->pattern, search, r->len+1);
436 }
437
438 switch (type) {
439 case MATCH_FULL:
440 r->match = regex_match_full;
441 break;
442 case MATCH_FRONT_ONLY:
443 r->match = regex_match_front;
444 break;
445 case MATCH_MIDDLE_ONLY:
446 r->match = regex_match_middle;
447 break;
448 case MATCH_END_ONLY:
449 r->match = regex_match_end;
450 break;
451 case MATCH_GLOB:
452 r->match = regex_match_glob;
453 break;
454 }
455
456 pred->not ^= not;
457 }
458
459 enum move_type {
460 MOVE_DOWN,
461 MOVE_UP_FROM_LEFT,
462 MOVE_UP_FROM_RIGHT
463 };
464
465 static struct filter_pred *
466 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
467 int index, enum move_type *move)
468 {
469 if (pred->parent & FILTER_PRED_IS_RIGHT)
470 *move = MOVE_UP_FROM_RIGHT;
471 else
472 *move = MOVE_UP_FROM_LEFT;
473 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
474
475 return pred;
476 }
477
478 enum walk_return {
479 WALK_PRED_ABORT,
480 WALK_PRED_PARENT,
481 WALK_PRED_DEFAULT,
482 };
483
484 typedef int (*filter_pred_walkcb_t) (enum move_type move,
485 struct filter_pred *pred,
486 int *err, void *data);
487
488 static int walk_pred_tree(struct filter_pred *preds,
489 struct filter_pred *root,
490 filter_pred_walkcb_t cb, void *data)
491 {
492 struct filter_pred *pred = root;
493 enum move_type move = MOVE_DOWN;
494 int done = 0;
495
496 if (!preds)
497 return -EINVAL;
498
499 do {
500 int err = 0, ret;
501
502 ret = cb(move, pred, &err, data);
503 if (ret == WALK_PRED_ABORT)
504 return err;
505 if (ret == WALK_PRED_PARENT)
506 goto get_parent;
507
508 switch (move) {
509 case MOVE_DOWN:
510 if (pred->left != FILTER_PRED_INVALID) {
511 pred = &preds[pred->left];
512 continue;
513 }
514 goto get_parent;
515 case MOVE_UP_FROM_LEFT:
516 pred = &preds[pred->right];
517 move = MOVE_DOWN;
518 continue;
519 case MOVE_UP_FROM_RIGHT:
520 get_parent:
521 if (pred == root)
522 break;
523 pred = get_pred_parent(pred, preds,
524 pred->parent,
525 &move);
526 continue;
527 }
528 done = 1;
529 } while (!done);
530
531 /* We are fine. */
532 return 0;
533 }
534
535 /*
536 * A series of AND or ORs where found together. Instead of
537 * climbing up and down the tree branches, an array of the
538 * ops were made in order of checks. We can just move across
539 * the array and short circuit if needed.
540 */
541 static int process_ops(struct filter_pred *preds,
542 struct filter_pred *op, void *rec)
543 {
544 struct filter_pred *pred;
545 int match = 0;
546 int type;
547 int i;
548
549 /*
550 * Micro-optimization: We set type to true if op
551 * is an OR and false otherwise (AND). Then we
552 * just need to test if the match is equal to
553 * the type, and if it is, we can short circuit the
554 * rest of the checks:
555 *
556 * if ((match && op->op == OP_OR) ||
557 * (!match && op->op == OP_AND))
558 * return match;
559 */
560 type = op->op == OP_OR;
561
562 for (i = 0; i < op->val; i++) {
563 pred = &preds[op->ops[i]];
564 if (!WARN_ON_ONCE(!pred->fn))
565 match = pred->fn(pred, rec);
566 if (!!match == type)
567 break;
568 }
569 /* If not of not match is equal to not of not, then it is a match */
570 return !!match == !op->not;
571 }
572
573 struct filter_match_preds_data {
574 struct filter_pred *preds;
575 int match;
576 void *rec;
577 };
578
579 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
580 int *err, void *data)
581 {
582 struct filter_match_preds_data *d = data;
583
584 *err = 0;
585 switch (move) {
586 case MOVE_DOWN:
587 /* only AND and OR have children */
588 if (pred->left != FILTER_PRED_INVALID) {
589 /* If ops is set, then it was folded. */
590 if (!pred->ops)
591 return WALK_PRED_DEFAULT;
592 /* We can treat folded ops as a leaf node */
593 d->match = process_ops(d->preds, pred, d->rec);
594 } else {
595 if (!WARN_ON_ONCE(!pred->fn))
596 d->match = pred->fn(pred, d->rec);
597 }
598
599 return WALK_PRED_PARENT;
600 case MOVE_UP_FROM_LEFT:
601 /*
602 * Check for short circuits.
603 *
604 * Optimization: !!match == (pred->op == OP_OR)
605 * is the same as:
606 * if ((match && pred->op == OP_OR) ||
607 * (!match && pred->op == OP_AND))
608 */
609 if (!!d->match == (pred->op == OP_OR))
610 return WALK_PRED_PARENT;
611 break;
612 case MOVE_UP_FROM_RIGHT:
613 break;
614 }
615
616 return WALK_PRED_DEFAULT;
617 }
618
619 /* return 1 if event matches, 0 otherwise (discard) */
620 int filter_match_preds(struct event_filter *filter, void *rec)
621 {
622 struct filter_pred *preds;
623 struct filter_pred *root;
624 struct filter_match_preds_data data = {
625 /* match is currently meaningless */
626 .match = -1,
627 .rec = rec,
628 };
629 int n_preds, ret;
630
631 /* no filter is considered a match */
632 if (!filter)
633 return 1;
634
635 n_preds = filter->n_preds;
636 if (!n_preds)
637 return 1;
638
639 /*
640 * n_preds, root and filter->preds are protect with preemption disabled.
641 */
642 root = rcu_dereference_sched(filter->root);
643 if (!root)
644 return 1;
645
646 data.preds = preds = rcu_dereference_sched(filter->preds);
647 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
648 WARN_ON(ret);
649 return data.match;
650 }
651 EXPORT_SYMBOL_GPL(filter_match_preds);
652
653 static void parse_error(struct filter_parse_state *ps, int err, int pos)
654 {
655 ps->lasterr = err;
656 ps->lasterr_pos = pos;
657 }
658
659 static void remove_filter_string(struct event_filter *filter)
660 {
661 if (!filter)
662 return;
663
664 kfree(filter->filter_string);
665 filter->filter_string = NULL;
666 }
667
668 static int replace_filter_string(struct event_filter *filter,
669 char *filter_string)
670 {
671 kfree(filter->filter_string);
672 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
673 if (!filter->filter_string)
674 return -ENOMEM;
675
676 return 0;
677 }
678
679 static int append_filter_string(struct event_filter *filter,
680 char *string)
681 {
682 int newlen;
683 char *new_filter_string;
684
685 BUG_ON(!filter->filter_string);
686 newlen = strlen(filter->filter_string) + strlen(string) + 1;
687 new_filter_string = kmalloc(newlen, GFP_KERNEL);
688 if (!new_filter_string)
689 return -ENOMEM;
690
691 strcpy(new_filter_string, filter->filter_string);
692 strcat(new_filter_string, string);
693 kfree(filter->filter_string);
694 filter->filter_string = new_filter_string;
695
696 return 0;
697 }
698
699 static void append_filter_err(struct filter_parse_state *ps,
700 struct event_filter *filter)
701 {
702 int pos = ps->lasterr_pos;
703 char *buf, *pbuf;
704
705 buf = (char *)__get_free_page(GFP_TEMPORARY);
706 if (!buf)
707 return;
708
709 append_filter_string(filter, "\n");
710 memset(buf, ' ', PAGE_SIZE);
711 if (pos > PAGE_SIZE - 128)
712 pos = 0;
713 buf[pos] = '^';
714 pbuf = &buf[pos] + 1;
715
716 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
717 append_filter_string(filter, buf);
718 free_page((unsigned long) buf);
719 }
720
721 static inline struct event_filter *event_filter(struct trace_event_file *file)
722 {
723 return file->filter;
724 }
725
726 /* caller must hold event_mutex */
727 void print_event_filter(struct trace_event_file *file, struct trace_seq *s)
728 {
729 struct event_filter *filter = event_filter(file);
730
731 if (filter && filter->filter_string)
732 trace_seq_printf(s, "%s\n", filter->filter_string);
733 else
734 trace_seq_puts(s, "none\n");
735 }
736
737 void print_subsystem_event_filter(struct event_subsystem *system,
738 struct trace_seq *s)
739 {
740 struct event_filter *filter;
741
742 mutex_lock(&event_mutex);
743 filter = system->filter;
744 if (filter && filter->filter_string)
745 trace_seq_printf(s, "%s\n", filter->filter_string);
746 else
747 trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
748 mutex_unlock(&event_mutex);
749 }
750
751 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
752 {
753 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
754 if (!stack->preds)
755 return -ENOMEM;
756 stack->index = n_preds;
757 return 0;
758 }
759
760 static void __free_pred_stack(struct pred_stack *stack)
761 {
762 kfree(stack->preds);
763 stack->index = 0;
764 }
765
766 static int __push_pred_stack(struct pred_stack *stack,
767 struct filter_pred *pred)
768 {
769 int index = stack->index;
770
771 if (WARN_ON(index == 0))
772 return -ENOSPC;
773
774 stack->preds[--index] = pred;
775 stack->index = index;
776 return 0;
777 }
778
779 static struct filter_pred *
780 __pop_pred_stack(struct pred_stack *stack)
781 {
782 struct filter_pred *pred;
783 int index = stack->index;
784
785 pred = stack->preds[index++];
786 if (!pred)
787 return NULL;
788
789 stack->index = index;
790 return pred;
791 }
792
793 static int filter_set_pred(struct event_filter *filter,
794 int idx,
795 struct pred_stack *stack,
796 struct filter_pred *src)
797 {
798 struct filter_pred *dest = &filter->preds[idx];
799 struct filter_pred *left;
800 struct filter_pred *right;
801
802 *dest = *src;
803 dest->index = idx;
804
805 if (dest->op == OP_OR || dest->op == OP_AND) {
806 right = __pop_pred_stack(stack);
807 left = __pop_pred_stack(stack);
808 if (!left || !right)
809 return -EINVAL;
810 /*
811 * If both children can be folded
812 * and they are the same op as this op or a leaf,
813 * then this op can be folded.
814 */
815 if (left->index & FILTER_PRED_FOLD &&
816 ((left->op == dest->op && !left->not) ||
817 left->left == FILTER_PRED_INVALID) &&
818 right->index & FILTER_PRED_FOLD &&
819 ((right->op == dest->op && !right->not) ||
820 right->left == FILTER_PRED_INVALID))
821 dest->index |= FILTER_PRED_FOLD;
822
823 dest->left = left->index & ~FILTER_PRED_FOLD;
824 dest->right = right->index & ~FILTER_PRED_FOLD;
825 left->parent = dest->index & ~FILTER_PRED_FOLD;
826 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
827 } else {
828 /*
829 * Make dest->left invalid to be used as a quick
830 * way to know this is a leaf node.
831 */
832 dest->left = FILTER_PRED_INVALID;
833
834 /* All leafs allow folding the parent ops. */
835 dest->index |= FILTER_PRED_FOLD;
836 }
837
838 return __push_pred_stack(stack, dest);
839 }
840
841 static void __free_preds(struct event_filter *filter)
842 {
843 int i;
844
845 if (filter->preds) {
846 for (i = 0; i < filter->n_preds; i++)
847 kfree(filter->preds[i].ops);
848 kfree(filter->preds);
849 filter->preds = NULL;
850 }
851 filter->a_preds = 0;
852 filter->n_preds = 0;
853 }
854
855 static void filter_disable(struct trace_event_file *file)
856 {
857 unsigned long old_flags = file->flags;
858
859 file->flags &= ~EVENT_FILE_FL_FILTERED;
860
861 if (old_flags != file->flags)
862 trace_buffered_event_disable();
863 }
864
865 static void __free_filter(struct event_filter *filter)
866 {
867 if (!filter)
868 return;
869
870 __free_preds(filter);
871 kfree(filter->filter_string);
872 kfree(filter);
873 }
874
875 void free_event_filter(struct event_filter *filter)
876 {
877 __free_filter(filter);
878 }
879
880 static struct event_filter *__alloc_filter(void)
881 {
882 struct event_filter *filter;
883
884 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
885 return filter;
886 }
887
888 static int __alloc_preds(struct event_filter *filter, int n_preds)
889 {
890 struct filter_pred *pred;
891 int i;
892
893 if (filter->preds)
894 __free_preds(filter);
895
896 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
897
898 if (!filter->preds)
899 return -ENOMEM;
900
901 filter->a_preds = n_preds;
902 filter->n_preds = 0;
903
904 for (i = 0; i < n_preds; i++) {
905 pred = &filter->preds[i];
906 pred->fn = filter_pred_none;
907 }
908
909 return 0;
910 }
911
912 static inline void __remove_filter(struct trace_event_file *file)
913 {
914 filter_disable(file);
915 remove_filter_string(file->filter);
916 }
917
918 static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir,
919 struct trace_array *tr)
920 {
921 struct trace_event_file *file;
922
923 list_for_each_entry(file, &tr->events, list) {
924 if (file->system != dir)
925 continue;
926 __remove_filter(file);
927 }
928 }
929
930 static inline void __free_subsystem_filter(struct trace_event_file *file)
931 {
932 __free_filter(file->filter);
933 file->filter = NULL;
934 }
935
936 static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
937 struct trace_array *tr)
938 {
939 struct trace_event_file *file;
940
941 list_for_each_entry(file, &tr->events, list) {
942 if (file->system != dir)
943 continue;
944 __free_subsystem_filter(file);
945 }
946 }
947
948 static int filter_add_pred(struct filter_parse_state *ps,
949 struct event_filter *filter,
950 struct filter_pred *pred,
951 struct pred_stack *stack)
952 {
953 int err;
954
955 if (WARN_ON(filter->n_preds == filter->a_preds)) {
956 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
957 return -ENOSPC;
958 }
959
960 err = filter_set_pred(filter, filter->n_preds, stack, pred);
961 if (err)
962 return err;
963
964 filter->n_preds++;
965
966 return 0;
967 }
968
969 int filter_assign_type(const char *type)
970 {
971 if (strstr(type, "__data_loc") && strstr(type, "char"))
972 return FILTER_DYN_STRING;
973
974 if (strchr(type, '[') && strstr(type, "char"))
975 return FILTER_STATIC_STRING;
976
977 return FILTER_OTHER;
978 }
979
980 static bool is_legal_op(struct ftrace_event_field *field, enum filter_op_ids op)
981 {
982 if (is_string_field(field) &&
983 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
984 return false;
985 if (!is_string_field(field) && op == OP_GLOB)
986 return false;
987
988 return true;
989 }
990
991 static filter_pred_fn_t select_comparison_fn(enum filter_op_ids op,
992 int field_size, int field_is_signed)
993 {
994 filter_pred_fn_t fn = NULL;
995
996 switch (field_size) {
997 case 8:
998 if (op == OP_EQ || op == OP_NE)
999 fn = filter_pred_64;
1000 else if (field_is_signed)
1001 fn = pred_funcs_s64[op - PRED_FUNC_START];
1002 else
1003 fn = pred_funcs_u64[op - PRED_FUNC_START];
1004 break;
1005 case 4:
1006 if (op == OP_EQ || op == OP_NE)
1007 fn = filter_pred_32;
1008 else if (field_is_signed)
1009 fn = pred_funcs_s32[op - PRED_FUNC_START];
1010 else
1011 fn = pred_funcs_u32[op - PRED_FUNC_START];
1012 break;
1013 case 2:
1014 if (op == OP_EQ || op == OP_NE)
1015 fn = filter_pred_16;
1016 else if (field_is_signed)
1017 fn = pred_funcs_s16[op - PRED_FUNC_START];
1018 else
1019 fn = pred_funcs_u16[op - PRED_FUNC_START];
1020 break;
1021 case 1:
1022 if (op == OP_EQ || op == OP_NE)
1023 fn = filter_pred_8;
1024 else if (field_is_signed)
1025 fn = pred_funcs_s8[op - PRED_FUNC_START];
1026 else
1027 fn = pred_funcs_u8[op - PRED_FUNC_START];
1028 break;
1029 }
1030
1031 return fn;
1032 }
1033
1034 static int init_pred(struct filter_parse_state *ps,
1035 struct ftrace_event_field *field,
1036 struct filter_pred *pred)
1037
1038 {
1039 filter_pred_fn_t fn = filter_pred_none;
1040 unsigned long long val;
1041 int ret;
1042
1043 pred->offset = field->offset;
1044
1045 if (!is_legal_op(field, pred->op)) {
1046 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
1047 return -EINVAL;
1048 }
1049
1050 if (field->filter_type == FILTER_COMM) {
1051 filter_build_regex(pred);
1052 fn = filter_pred_comm;
1053 pred->regex.field_len = TASK_COMM_LEN;
1054 } else if (is_string_field(field)) {
1055 filter_build_regex(pred);
1056
1057 if (field->filter_type == FILTER_STATIC_STRING) {
1058 fn = filter_pred_string;
1059 pred->regex.field_len = field->size;
1060 } else if (field->filter_type == FILTER_DYN_STRING)
1061 fn = filter_pred_strloc;
1062 else
1063 fn = filter_pred_pchar;
1064 } else if (is_function_field(field)) {
1065 if (strcmp(field->name, "ip")) {
1066 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1067 return -EINVAL;
1068 }
1069 } else {
1070 if (field->is_signed)
1071 ret = kstrtoll(pred->regex.pattern, 0, &val);
1072 else
1073 ret = kstrtoull(pred->regex.pattern, 0, &val);
1074 if (ret) {
1075 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1076 return -EINVAL;
1077 }
1078 pred->val = val;
1079
1080 if (field->filter_type == FILTER_CPU)
1081 fn = filter_pred_cpu;
1082 else
1083 fn = select_comparison_fn(pred->op, field->size,
1084 field->is_signed);
1085 if (!fn) {
1086 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1087 return -EINVAL;
1088 }
1089 }
1090
1091 if (pred->op == OP_NE)
1092 pred->not ^= 1;
1093
1094 pred->fn = fn;
1095 return 0;
1096 }
1097
1098 static void parse_init(struct filter_parse_state *ps,
1099 struct filter_op *ops,
1100 char *infix_string)
1101 {
1102 memset(ps, '\0', sizeof(*ps));
1103
1104 ps->infix.string = infix_string;
1105 ps->infix.cnt = strlen(infix_string);
1106 ps->ops = ops;
1107
1108 INIT_LIST_HEAD(&ps->opstack);
1109 INIT_LIST_HEAD(&ps->postfix);
1110 }
1111
1112 static char infix_next(struct filter_parse_state *ps)
1113 {
1114 if (!ps->infix.cnt)
1115 return 0;
1116
1117 ps->infix.cnt--;
1118
1119 return ps->infix.string[ps->infix.tail++];
1120 }
1121
1122 static char infix_peek(struct filter_parse_state *ps)
1123 {
1124 if (ps->infix.tail == strlen(ps->infix.string))
1125 return 0;
1126
1127 return ps->infix.string[ps->infix.tail];
1128 }
1129
1130 static void infix_advance(struct filter_parse_state *ps)
1131 {
1132 if (!ps->infix.cnt)
1133 return;
1134
1135 ps->infix.cnt--;
1136 ps->infix.tail++;
1137 }
1138
1139 static inline int is_precedence_lower(struct filter_parse_state *ps,
1140 int a, int b)
1141 {
1142 return ps->ops[a].precedence < ps->ops[b].precedence;
1143 }
1144
1145 static inline int is_op_char(struct filter_parse_state *ps, char c)
1146 {
1147 int i;
1148
1149 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1150 if (ps->ops[i].string[0] == c)
1151 return 1;
1152 }
1153
1154 return 0;
1155 }
1156
1157 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1158 {
1159 char nextc = infix_peek(ps);
1160 char opstr[3];
1161 int i;
1162
1163 opstr[0] = firstc;
1164 opstr[1] = nextc;
1165 opstr[2] = '\0';
1166
1167 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1168 if (!strcmp(opstr, ps->ops[i].string)) {
1169 infix_advance(ps);
1170 return ps->ops[i].id;
1171 }
1172 }
1173
1174 opstr[1] = '\0';
1175
1176 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1177 if (!strcmp(opstr, ps->ops[i].string))
1178 return ps->ops[i].id;
1179 }
1180
1181 return OP_NONE;
1182 }
1183
1184 static inline void clear_operand_string(struct filter_parse_state *ps)
1185 {
1186 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1187 ps->operand.tail = 0;
1188 }
1189
1190 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1191 {
1192 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1193 return -EINVAL;
1194
1195 ps->operand.string[ps->operand.tail++] = c;
1196
1197 return 0;
1198 }
1199
1200 static int filter_opstack_push(struct filter_parse_state *ps,
1201 enum filter_op_ids op)
1202 {
1203 struct opstack_op *opstack_op;
1204
1205 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1206 if (!opstack_op)
1207 return -ENOMEM;
1208
1209 opstack_op->op = op;
1210 list_add(&opstack_op->list, &ps->opstack);
1211
1212 return 0;
1213 }
1214
1215 static int filter_opstack_empty(struct filter_parse_state *ps)
1216 {
1217 return list_empty(&ps->opstack);
1218 }
1219
1220 static int filter_opstack_top(struct filter_parse_state *ps)
1221 {
1222 struct opstack_op *opstack_op;
1223
1224 if (filter_opstack_empty(ps))
1225 return OP_NONE;
1226
1227 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1228
1229 return opstack_op->op;
1230 }
1231
1232 static int filter_opstack_pop(struct filter_parse_state *ps)
1233 {
1234 struct opstack_op *opstack_op;
1235 enum filter_op_ids op;
1236
1237 if (filter_opstack_empty(ps))
1238 return OP_NONE;
1239
1240 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1241 op = opstack_op->op;
1242 list_del(&opstack_op->list);
1243
1244 kfree(opstack_op);
1245
1246 return op;
1247 }
1248
1249 static void filter_opstack_clear(struct filter_parse_state *ps)
1250 {
1251 while (!filter_opstack_empty(ps))
1252 filter_opstack_pop(ps);
1253 }
1254
1255 static char *curr_operand(struct filter_parse_state *ps)
1256 {
1257 return ps->operand.string;
1258 }
1259
1260 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1261 {
1262 struct postfix_elt *elt;
1263
1264 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1265 if (!elt)
1266 return -ENOMEM;
1267
1268 elt->op = OP_NONE;
1269 elt->operand = kstrdup(operand, GFP_KERNEL);
1270 if (!elt->operand) {
1271 kfree(elt);
1272 return -ENOMEM;
1273 }
1274
1275 list_add_tail(&elt->list, &ps->postfix);
1276
1277 return 0;
1278 }
1279
1280 static int postfix_append_op(struct filter_parse_state *ps, enum filter_op_ids op)
1281 {
1282 struct postfix_elt *elt;
1283
1284 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1285 if (!elt)
1286 return -ENOMEM;
1287
1288 elt->op = op;
1289 elt->operand = NULL;
1290
1291 list_add_tail(&elt->list, &ps->postfix);
1292
1293 return 0;
1294 }
1295
1296 static void postfix_clear(struct filter_parse_state *ps)
1297 {
1298 struct postfix_elt *elt;
1299
1300 while (!list_empty(&ps->postfix)) {
1301 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1302 list_del(&elt->list);
1303 kfree(elt->operand);
1304 kfree(elt);
1305 }
1306 }
1307
1308 static int filter_parse(struct filter_parse_state *ps)
1309 {
1310 enum filter_op_ids op, top_op;
1311 int in_string = 0;
1312 char ch;
1313
1314 while ((ch = infix_next(ps))) {
1315 if (ch == '"') {
1316 in_string ^= 1;
1317 continue;
1318 }
1319
1320 if (in_string)
1321 goto parse_operand;
1322
1323 if (isspace(ch))
1324 continue;
1325
1326 if (is_op_char(ps, ch)) {
1327 op = infix_get_op(ps, ch);
1328 if (op == OP_NONE) {
1329 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1330 return -EINVAL;
1331 }
1332
1333 if (strlen(curr_operand(ps))) {
1334 postfix_append_operand(ps, curr_operand(ps));
1335 clear_operand_string(ps);
1336 }
1337
1338 while (!filter_opstack_empty(ps)) {
1339 top_op = filter_opstack_top(ps);
1340 if (!is_precedence_lower(ps, top_op, op)) {
1341 top_op = filter_opstack_pop(ps);
1342 postfix_append_op(ps, top_op);
1343 continue;
1344 }
1345 break;
1346 }
1347
1348 filter_opstack_push(ps, op);
1349 continue;
1350 }
1351
1352 if (ch == '(') {
1353 filter_opstack_push(ps, OP_OPEN_PAREN);
1354 continue;
1355 }
1356
1357 if (ch == ')') {
1358 if (strlen(curr_operand(ps))) {
1359 postfix_append_operand(ps, curr_operand(ps));
1360 clear_operand_string(ps);
1361 }
1362
1363 top_op = filter_opstack_pop(ps);
1364 while (top_op != OP_NONE) {
1365 if (top_op == OP_OPEN_PAREN)
1366 break;
1367 postfix_append_op(ps, top_op);
1368 top_op = filter_opstack_pop(ps);
1369 }
1370 if (top_op == OP_NONE) {
1371 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1372 return -EINVAL;
1373 }
1374 continue;
1375 }
1376 parse_operand:
1377 if (append_operand_char(ps, ch)) {
1378 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1379 return -EINVAL;
1380 }
1381 }
1382
1383 if (strlen(curr_operand(ps)))
1384 postfix_append_operand(ps, curr_operand(ps));
1385
1386 while (!filter_opstack_empty(ps)) {
1387 top_op = filter_opstack_pop(ps);
1388 if (top_op == OP_NONE)
1389 break;
1390 if (top_op == OP_OPEN_PAREN) {
1391 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1392 return -EINVAL;
1393 }
1394 postfix_append_op(ps, top_op);
1395 }
1396
1397 return 0;
1398 }
1399
1400 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1401 struct trace_event_call *call,
1402 enum filter_op_ids op,
1403 char *operand1, char *operand2)
1404 {
1405 struct ftrace_event_field *field;
1406 static struct filter_pred pred;
1407
1408 memset(&pred, 0, sizeof(pred));
1409 pred.op = op;
1410
1411 if (op == OP_AND || op == OP_OR)
1412 return &pred;
1413
1414 if (!operand1 || !operand2) {
1415 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1416 return NULL;
1417 }
1418
1419 field = trace_find_event_field(call, operand1);
1420 if (!field) {
1421 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1422 return NULL;
1423 }
1424
1425 strcpy(pred.regex.pattern, operand2);
1426 pred.regex.len = strlen(pred.regex.pattern);
1427 pred.field = field;
1428 return init_pred(ps, field, &pred) ? NULL : &pred;
1429 }
1430
1431 static int check_preds(struct filter_parse_state *ps)
1432 {
1433 int n_normal_preds = 0, n_logical_preds = 0;
1434 struct postfix_elt *elt;
1435 int cnt = 0;
1436
1437 list_for_each_entry(elt, &ps->postfix, list) {
1438 if (elt->op == OP_NONE) {
1439 cnt++;
1440 continue;
1441 }
1442
1443 if (elt->op == OP_AND || elt->op == OP_OR) {
1444 n_logical_preds++;
1445 cnt--;
1446 continue;
1447 }
1448 if (elt->op != OP_NOT)
1449 cnt--;
1450 n_normal_preds++;
1451 /* all ops should have operands */
1452 if (cnt < 0)
1453 break;
1454 }
1455
1456 if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
1457 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1458 return -EINVAL;
1459 }
1460
1461 return 0;
1462 }
1463
1464 static int count_preds(struct filter_parse_state *ps)
1465 {
1466 struct postfix_elt *elt;
1467 int n_preds = 0;
1468
1469 list_for_each_entry(elt, &ps->postfix, list) {
1470 if (elt->op == OP_NONE)
1471 continue;
1472 n_preds++;
1473 }
1474
1475 return n_preds;
1476 }
1477
1478 struct check_pred_data {
1479 int count;
1480 int max;
1481 };
1482
1483 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1484 int *err, void *data)
1485 {
1486 struct check_pred_data *d = data;
1487
1488 if (WARN_ON(d->count++ > d->max)) {
1489 *err = -EINVAL;
1490 return WALK_PRED_ABORT;
1491 }
1492 return WALK_PRED_DEFAULT;
1493 }
1494
1495 /*
1496 * The tree is walked at filtering of an event. If the tree is not correctly
1497 * built, it may cause an infinite loop. Check here that the tree does
1498 * indeed terminate.
1499 */
1500 static int check_pred_tree(struct event_filter *filter,
1501 struct filter_pred *root)
1502 {
1503 struct check_pred_data data = {
1504 /*
1505 * The max that we can hit a node is three times.
1506 * Once going down, once coming up from left, and
1507 * once coming up from right. This is more than enough
1508 * since leafs are only hit a single time.
1509 */
1510 .max = 3 * filter->n_preds,
1511 .count = 0,
1512 };
1513
1514 return walk_pred_tree(filter->preds, root,
1515 check_pred_tree_cb, &data);
1516 }
1517
1518 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1519 int *err, void *data)
1520 {
1521 int *count = data;
1522
1523 if ((move == MOVE_DOWN) &&
1524 (pred->left == FILTER_PRED_INVALID))
1525 (*count)++;
1526
1527 return WALK_PRED_DEFAULT;
1528 }
1529
1530 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1531 {
1532 int count = 0, ret;
1533
1534 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1535 WARN_ON(ret);
1536 return count;
1537 }
1538
1539 struct fold_pred_data {
1540 struct filter_pred *root;
1541 int count;
1542 int children;
1543 };
1544
1545 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1546 int *err, void *data)
1547 {
1548 struct fold_pred_data *d = data;
1549 struct filter_pred *root = d->root;
1550
1551 if (move != MOVE_DOWN)
1552 return WALK_PRED_DEFAULT;
1553 if (pred->left != FILTER_PRED_INVALID)
1554 return WALK_PRED_DEFAULT;
1555
1556 if (WARN_ON(d->count == d->children)) {
1557 *err = -EINVAL;
1558 return WALK_PRED_ABORT;
1559 }
1560
1561 pred->index &= ~FILTER_PRED_FOLD;
1562 root->ops[d->count++] = pred->index;
1563 return WALK_PRED_DEFAULT;
1564 }
1565
1566 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1567 {
1568 struct fold_pred_data data = {
1569 .root = root,
1570 .count = 0,
1571 };
1572 int children;
1573
1574 /* No need to keep the fold flag */
1575 root->index &= ~FILTER_PRED_FOLD;
1576
1577 /* If the root is a leaf then do nothing */
1578 if (root->left == FILTER_PRED_INVALID)
1579 return 0;
1580
1581 /* count the children */
1582 children = count_leafs(preds, &preds[root->left]);
1583 children += count_leafs(preds, &preds[root->right]);
1584
1585 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1586 if (!root->ops)
1587 return -ENOMEM;
1588
1589 root->val = children;
1590 data.children = children;
1591 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1592 }
1593
1594 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1595 int *err, void *data)
1596 {
1597 struct filter_pred *preds = data;
1598
1599 if (move != MOVE_DOWN)
1600 return WALK_PRED_DEFAULT;
1601 if (!(pred->index & FILTER_PRED_FOLD))
1602 return WALK_PRED_DEFAULT;
1603
1604 *err = fold_pred(preds, pred);
1605 if (*err)
1606 return WALK_PRED_ABORT;
1607
1608 /* eveyrhing below is folded, continue with parent */
1609 return WALK_PRED_PARENT;
1610 }
1611
1612 /*
1613 * To optimize the processing of the ops, if we have several "ors" or
1614 * "ands" together, we can put them in an array and process them all
1615 * together speeding up the filter logic.
1616 */
1617 static int fold_pred_tree(struct event_filter *filter,
1618 struct filter_pred *root)
1619 {
1620 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1621 filter->preds);
1622 }
1623
1624 static int replace_preds(struct trace_event_call *call,
1625 struct event_filter *filter,
1626 struct filter_parse_state *ps,
1627 bool dry_run)
1628 {
1629 char *operand1 = NULL, *operand2 = NULL;
1630 struct filter_pred *pred;
1631 struct filter_pred *root;
1632 struct postfix_elt *elt;
1633 struct pred_stack stack = { }; /* init to NULL */
1634 int err;
1635 int n_preds = 0;
1636
1637 n_preds = count_preds(ps);
1638 if (n_preds >= MAX_FILTER_PRED) {
1639 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1640 return -ENOSPC;
1641 }
1642
1643 err = check_preds(ps);
1644 if (err)
1645 return err;
1646
1647 if (!dry_run) {
1648 err = __alloc_pred_stack(&stack, n_preds);
1649 if (err)
1650 return err;
1651 err = __alloc_preds(filter, n_preds);
1652 if (err)
1653 goto fail;
1654 }
1655
1656 n_preds = 0;
1657 list_for_each_entry(elt, &ps->postfix, list) {
1658 if (elt->op == OP_NONE) {
1659 if (!operand1)
1660 operand1 = elt->operand;
1661 else if (!operand2)
1662 operand2 = elt->operand;
1663 else {
1664 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1665 err = -EINVAL;
1666 goto fail;
1667 }
1668 continue;
1669 }
1670
1671 if (elt->op == OP_NOT) {
1672 if (!n_preds || operand1 || operand2) {
1673 parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
1674 err = -EINVAL;
1675 goto fail;
1676 }
1677 if (!dry_run)
1678 filter->preds[n_preds - 1].not ^= 1;
1679 continue;
1680 }
1681
1682 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1683 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1684 err = -ENOSPC;
1685 goto fail;
1686 }
1687
1688 pred = create_pred(ps, call, elt->op, operand1, operand2);
1689 if (!pred) {
1690 err = -EINVAL;
1691 goto fail;
1692 }
1693
1694 if (!dry_run) {
1695 err = filter_add_pred(ps, filter, pred, &stack);
1696 if (err)
1697 goto fail;
1698 }
1699
1700 operand1 = operand2 = NULL;
1701 }
1702
1703 if (!dry_run) {
1704 /* We should have one item left on the stack */
1705 pred = __pop_pred_stack(&stack);
1706 if (!pred)
1707 return -EINVAL;
1708 /* This item is where we start from in matching */
1709 root = pred;
1710 /* Make sure the stack is empty */
1711 pred = __pop_pred_stack(&stack);
1712 if (WARN_ON(pred)) {
1713 err = -EINVAL;
1714 filter->root = NULL;
1715 goto fail;
1716 }
1717 err = check_pred_tree(filter, root);
1718 if (err)
1719 goto fail;
1720
1721 /* Optimize the tree */
1722 err = fold_pred_tree(filter, root);
1723 if (err)
1724 goto fail;
1725
1726 /* We don't set root until we know it works */
1727 barrier();
1728 filter->root = root;
1729 }
1730
1731 err = 0;
1732 fail:
1733 __free_pred_stack(&stack);
1734 return err;
1735 }
1736
1737 static inline void event_set_filtered_flag(struct trace_event_file *file)
1738 {
1739 unsigned long old_flags = file->flags;
1740
1741 file->flags |= EVENT_FILE_FL_FILTERED;
1742
1743 if (old_flags != file->flags)
1744 trace_buffered_event_enable();
1745 }
1746
1747 static inline void event_set_filter(struct trace_event_file *file,
1748 struct event_filter *filter)
1749 {
1750 rcu_assign_pointer(file->filter, filter);
1751 }
1752
1753 static inline void event_clear_filter(struct trace_event_file *file)
1754 {
1755 RCU_INIT_POINTER(file->filter, NULL);
1756 }
1757
1758 static inline void
1759 event_set_no_set_filter_flag(struct trace_event_file *file)
1760 {
1761 file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
1762 }
1763
1764 static inline void
1765 event_clear_no_set_filter_flag(struct trace_event_file *file)
1766 {
1767 file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
1768 }
1769
1770 static inline bool
1771 event_no_set_filter_flag(struct trace_event_file *file)
1772 {
1773 if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
1774 return true;
1775
1776 return false;
1777 }
1778
1779 struct filter_list {
1780 struct list_head list;
1781 struct event_filter *filter;
1782 };
1783
1784 static int replace_system_preds(struct trace_subsystem_dir *dir,
1785 struct trace_array *tr,
1786 struct filter_parse_state *ps,
1787 char *filter_string)
1788 {
1789 struct trace_event_file *file;
1790 struct filter_list *filter_item;
1791 struct filter_list *tmp;
1792 LIST_HEAD(filter_list);
1793 bool fail = true;
1794 int err;
1795
1796 list_for_each_entry(file, &tr->events, list) {
1797 if (file->system != dir)
1798 continue;
1799
1800 /*
1801 * Try to see if the filter can be applied
1802 * (filter arg is ignored on dry_run)
1803 */
1804 err = replace_preds(file->event_call, NULL, ps, true);
1805 if (err)
1806 event_set_no_set_filter_flag(file);
1807 else
1808 event_clear_no_set_filter_flag(file);
1809 }
1810
1811 list_for_each_entry(file, &tr->events, list) {
1812 struct event_filter *filter;
1813
1814 if (file->system != dir)
1815 continue;
1816
1817 if (event_no_set_filter_flag(file))
1818 continue;
1819
1820 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1821 if (!filter_item)
1822 goto fail_mem;
1823
1824 list_add_tail(&filter_item->list, &filter_list);
1825
1826 filter_item->filter = __alloc_filter();
1827 if (!filter_item->filter)
1828 goto fail_mem;
1829 filter = filter_item->filter;
1830
1831 /* Can only fail on no memory */
1832 err = replace_filter_string(filter, filter_string);
1833 if (err)
1834 goto fail_mem;
1835
1836 err = replace_preds(file->event_call, filter, ps, false);
1837 if (err) {
1838 filter_disable(file);
1839 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1840 append_filter_err(ps, filter);
1841 } else
1842 event_set_filtered_flag(file);
1843 /*
1844 * Regardless of if this returned an error, we still
1845 * replace the filter for the call.
1846 */
1847 filter = event_filter(file);
1848 event_set_filter(file, filter_item->filter);
1849 filter_item->filter = filter;
1850
1851 fail = false;
1852 }
1853
1854 if (fail)
1855 goto fail;
1856
1857 /*
1858 * The calls can still be using the old filters.
1859 * Do a synchronize_sched() to ensure all calls are
1860 * done with them before we free them.
1861 */
1862 synchronize_sched();
1863 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1864 __free_filter(filter_item->filter);
1865 list_del(&filter_item->list);
1866 kfree(filter_item);
1867 }
1868 return 0;
1869 fail:
1870 /* No call succeeded */
1871 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1872 list_del(&filter_item->list);
1873 kfree(filter_item);
1874 }
1875 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1876 return -EINVAL;
1877 fail_mem:
1878 /* If any call succeeded, we still need to sync */
1879 if (!fail)
1880 synchronize_sched();
1881 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1882 __free_filter(filter_item->filter);
1883 list_del(&filter_item->list);
1884 kfree(filter_item);
1885 }
1886 return -ENOMEM;
1887 }
1888
1889 static int create_filter_start(char *filter_str, bool set_str,
1890 struct filter_parse_state **psp,
1891 struct event_filter **filterp)
1892 {
1893 struct event_filter *filter;
1894 struct filter_parse_state *ps = NULL;
1895 int err = 0;
1896
1897 WARN_ON_ONCE(*psp || *filterp);
1898
1899 /* allocate everything, and if any fails, free all and fail */
1900 filter = __alloc_filter();
1901 if (filter && set_str)
1902 err = replace_filter_string(filter, filter_str);
1903
1904 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1905
1906 if (!filter || !ps || err) {
1907 kfree(ps);
1908 __free_filter(filter);
1909 return -ENOMEM;
1910 }
1911
1912 /* we're committed to creating a new filter */
1913 *filterp = filter;
1914 *psp = ps;
1915
1916 parse_init(ps, filter_ops, filter_str);
1917 err = filter_parse(ps);
1918 if (err && set_str)
1919 append_filter_err(ps, filter);
1920 return err;
1921 }
1922
1923 static void create_filter_finish(struct filter_parse_state *ps)
1924 {
1925 if (ps) {
1926 filter_opstack_clear(ps);
1927 postfix_clear(ps);
1928 kfree(ps);
1929 }
1930 }
1931
1932 /**
1933 * create_filter - create a filter for a trace_event_call
1934 * @call: trace_event_call to create a filter for
1935 * @filter_str: filter string
1936 * @set_str: remember @filter_str and enable detailed error in filter
1937 * @filterp: out param for created filter (always updated on return)
1938 *
1939 * Creates a filter for @call with @filter_str. If @set_str is %true,
1940 * @filter_str is copied and recorded in the new filter.
1941 *
1942 * On success, returns 0 and *@filterp points to the new filter. On
1943 * failure, returns -errno and *@filterp may point to %NULL or to a new
1944 * filter. In the latter case, the returned filter contains error
1945 * information if @set_str is %true and the caller is responsible for
1946 * freeing it.
1947 */
1948 static int create_filter(struct trace_event_call *call,
1949 char *filter_str, bool set_str,
1950 struct event_filter **filterp)
1951 {
1952 struct event_filter *filter = NULL;
1953 struct filter_parse_state *ps = NULL;
1954 int err;
1955
1956 err = create_filter_start(filter_str, set_str, &ps, &filter);
1957 if (!err) {
1958 err = replace_preds(call, filter, ps, false);
1959 if (err && set_str)
1960 append_filter_err(ps, filter);
1961 }
1962 if (err && !set_str) {
1963 free_event_filter(filter);
1964 filter = NULL;
1965 }
1966 create_filter_finish(ps);
1967
1968 *filterp = filter;
1969 return err;
1970 }
1971
1972 int create_event_filter(struct trace_event_call *call,
1973 char *filter_str, bool set_str,
1974 struct event_filter **filterp)
1975 {
1976 return create_filter(call, filter_str, set_str, filterp);
1977 }
1978
1979 /**
1980 * create_system_filter - create a filter for an event_subsystem
1981 * @system: event_subsystem to create a filter for
1982 * @filter_str: filter string
1983 * @filterp: out param for created filter (always updated on return)
1984 *
1985 * Identical to create_filter() except that it creates a subsystem filter
1986 * and always remembers @filter_str.
1987 */
1988 static int create_system_filter(struct trace_subsystem_dir *dir,
1989 struct trace_array *tr,
1990 char *filter_str, struct event_filter **filterp)
1991 {
1992 struct event_filter *filter = NULL;
1993 struct filter_parse_state *ps = NULL;
1994 int err;
1995
1996 err = create_filter_start(filter_str, true, &ps, &filter);
1997 if (!err) {
1998 err = replace_system_preds(dir, tr, ps, filter_str);
1999 if (!err) {
2000 /* System filters just show a default message */
2001 kfree(filter->filter_string);
2002 filter->filter_string = NULL;
2003 } else {
2004 append_filter_err(ps, filter);
2005 }
2006 }
2007 create_filter_finish(ps);
2008
2009 *filterp = filter;
2010 return err;
2011 }
2012
2013 /* caller must hold event_mutex */
2014 int apply_event_filter(struct trace_event_file *file, char *filter_string)
2015 {
2016 struct trace_event_call *call = file->event_call;
2017 struct event_filter *filter;
2018 int err;
2019
2020 if (!strcmp(strstrip(filter_string), "0")) {
2021 filter_disable(file);
2022 filter = event_filter(file);
2023
2024 if (!filter)
2025 return 0;
2026
2027 event_clear_filter(file);
2028
2029 /* Make sure the filter is not being used */
2030 synchronize_sched();
2031 __free_filter(filter);
2032
2033 return 0;
2034 }
2035
2036 err = create_filter(call, filter_string, true, &filter);
2037
2038 /*
2039 * Always swap the call filter with the new filter
2040 * even if there was an error. If there was an error
2041 * in the filter, we disable the filter and show the error
2042 * string
2043 */
2044 if (filter) {
2045 struct event_filter *tmp;
2046
2047 tmp = event_filter(file);
2048 if (!err)
2049 event_set_filtered_flag(file);
2050 else
2051 filter_disable(file);
2052
2053 event_set_filter(file, filter);
2054
2055 if (tmp) {
2056 /* Make sure the call is done with the filter */
2057 synchronize_sched();
2058 __free_filter(tmp);
2059 }
2060 }
2061
2062 return err;
2063 }
2064
2065 int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
2066 char *filter_string)
2067 {
2068 struct event_subsystem *system = dir->subsystem;
2069 struct trace_array *tr = dir->tr;
2070 struct event_filter *filter;
2071 int err = 0;
2072
2073 mutex_lock(&event_mutex);
2074
2075 /* Make sure the system still has events */
2076 if (!dir->nr_events) {
2077 err = -ENODEV;
2078 goto out_unlock;
2079 }
2080
2081 if (!strcmp(strstrip(filter_string), "0")) {
2082 filter_free_subsystem_preds(dir, tr);
2083 remove_filter_string(system->filter);
2084 filter = system->filter;
2085 system->filter = NULL;
2086 /* Ensure all filters are no longer used */
2087 synchronize_sched();
2088 filter_free_subsystem_filters(dir, tr);
2089 __free_filter(filter);
2090 goto out_unlock;
2091 }
2092
2093 err = create_system_filter(dir, tr, filter_string, &filter);
2094 if (filter) {
2095 /*
2096 * No event actually uses the system filter
2097 * we can free it without synchronize_sched().
2098 */
2099 __free_filter(system->filter);
2100 system->filter = filter;
2101 }
2102 out_unlock:
2103 mutex_unlock(&event_mutex);
2104
2105 return err;
2106 }
2107
2108 #ifdef CONFIG_PERF_EVENTS
2109
2110 void ftrace_profile_free_filter(struct perf_event *event)
2111 {
2112 struct event_filter *filter = event->filter;
2113
2114 event->filter = NULL;
2115 __free_filter(filter);
2116 }
2117
2118 struct function_filter_data {
2119 struct ftrace_ops *ops;
2120 int first_filter;
2121 int first_notrace;
2122 };
2123
2124 #ifdef CONFIG_FUNCTION_TRACER
2125 static char **
2126 ftrace_function_filter_re(char *buf, int len, int *count)
2127 {
2128 char *str, **re;
2129
2130 str = kstrndup(buf, len, GFP_KERNEL);
2131 if (!str)
2132 return NULL;
2133
2134 /*
2135 * The argv_split function takes white space
2136 * as a separator, so convert ',' into spaces.
2137 */
2138 strreplace(str, ',', ' ');
2139
2140 re = argv_split(GFP_KERNEL, str, count);
2141 kfree(str);
2142 return re;
2143 }
2144
2145 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2146 int reset, char *re, int len)
2147 {
2148 int ret;
2149
2150 if (filter)
2151 ret = ftrace_set_filter(ops, re, len, reset);
2152 else
2153 ret = ftrace_set_notrace(ops, re, len, reset);
2154
2155 return ret;
2156 }
2157
2158 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2159 struct function_filter_data *data)
2160 {
2161 int i, re_cnt, ret = -EINVAL;
2162 int *reset;
2163 char **re;
2164
2165 reset = filter ? &data->first_filter : &data->first_notrace;
2166
2167 /*
2168 * The 'ip' field could have multiple filters set, separated
2169 * either by space or comma. We first cut the filter and apply
2170 * all pieces separatelly.
2171 */
2172 re = ftrace_function_filter_re(buf, len, &re_cnt);
2173 if (!re)
2174 return -EINVAL;
2175
2176 for (i = 0; i < re_cnt; i++) {
2177 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2178 re[i], strlen(re[i]));
2179 if (ret)
2180 break;
2181
2182 if (*reset)
2183 *reset = 0;
2184 }
2185
2186 argv_free(re);
2187 return ret;
2188 }
2189
2190 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2191 {
2192 struct ftrace_event_field *field = pred->field;
2193
2194 if (leaf) {
2195 /*
2196 * Check the leaf predicate for function trace, verify:
2197 * - only '==' and '!=' is used
2198 * - the 'ip' field is used
2199 */
2200 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2201 return -EINVAL;
2202
2203 if (strcmp(field->name, "ip"))
2204 return -EINVAL;
2205 } else {
2206 /*
2207 * Check the non leaf predicate for function trace, verify:
2208 * - only '||' is used
2209 */
2210 if (pred->op != OP_OR)
2211 return -EINVAL;
2212 }
2213
2214 return 0;
2215 }
2216
2217 static int ftrace_function_set_filter_cb(enum move_type move,
2218 struct filter_pred *pred,
2219 int *err, void *data)
2220 {
2221 /* Checking the node is valid for function trace. */
2222 if ((move != MOVE_DOWN) ||
2223 (pred->left != FILTER_PRED_INVALID)) {
2224 *err = ftrace_function_check_pred(pred, 0);
2225 } else {
2226 *err = ftrace_function_check_pred(pred, 1);
2227 if (*err)
2228 return WALK_PRED_ABORT;
2229
2230 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2231 pred->regex.pattern,
2232 pred->regex.len,
2233 data);
2234 }
2235
2236 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2237 }
2238
2239 static int ftrace_function_set_filter(struct perf_event *event,
2240 struct event_filter *filter)
2241 {
2242 struct function_filter_data data = {
2243 .first_filter = 1,
2244 .first_notrace = 1,
2245 .ops = &event->ftrace_ops,
2246 };
2247
2248 return walk_pred_tree(filter->preds, filter->root,
2249 ftrace_function_set_filter_cb, &data);
2250 }
2251 #else
2252 static int ftrace_function_set_filter(struct perf_event *event,
2253 struct event_filter *filter)
2254 {
2255 return -ENODEV;
2256 }
2257 #endif /* CONFIG_FUNCTION_TRACER */
2258
2259 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2260 char *filter_str)
2261 {
2262 int err;
2263 struct event_filter *filter;
2264 struct trace_event_call *call;
2265
2266 mutex_lock(&event_mutex);
2267
2268 call = event->tp_event;
2269
2270 err = -EINVAL;
2271 if (!call)
2272 goto out_unlock;
2273
2274 err = -EEXIST;
2275 if (event->filter)
2276 goto out_unlock;
2277
2278 err = create_filter(call, filter_str, false, &filter);
2279 if (err)
2280 goto free_filter;
2281
2282 if (ftrace_event_is_function(call))
2283 err = ftrace_function_set_filter(event, filter);
2284 else
2285 event->filter = filter;
2286
2287 free_filter:
2288 if (err || ftrace_event_is_function(call))
2289 __free_filter(filter);
2290
2291 out_unlock:
2292 mutex_unlock(&event_mutex);
2293
2294 return err;
2295 }
2296
2297 #endif /* CONFIG_PERF_EVENTS */
2298
2299 #ifdef CONFIG_FTRACE_STARTUP_TEST
2300
2301 #include <linux/types.h>
2302 #include <linux/tracepoint.h>
2303
2304 #define CREATE_TRACE_POINTS
2305 #include "trace_events_filter_test.h"
2306
2307 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2308 { \
2309 .filter = FILTER, \
2310 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2311 .e = ve, .f = vf, .g = vg, .h = vh }, \
2312 .match = m, \
2313 .not_visited = nvisit, \
2314 }
2315 #define YES 1
2316 #define NO 0
2317
2318 static struct test_filter_data_t {
2319 char *filter;
2320 struct trace_event_raw_ftrace_test_filter rec;
2321 int match;
2322 char *not_visited;
2323 } test_filter_data[] = {
2324 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2325 "e == 1 && f == 1 && g == 1 && h == 1"
2326 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2327 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2328 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2329 #undef FILTER
2330 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2331 "e == 1 || f == 1 || g == 1 || h == 1"
2332 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2333 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2334 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2335 #undef FILTER
2336 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2337 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2338 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2339 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2340 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2341 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2342 #undef FILTER
2343 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2344 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2345 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2346 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2347 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2348 #undef FILTER
2349 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2350 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2351 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2352 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2353 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2354 #undef FILTER
2355 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2356 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2357 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2358 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2359 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2360 #undef FILTER
2361 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2362 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2363 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2364 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2365 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2366 #undef FILTER
2367 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2368 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2369 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2370 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2371 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2372 };
2373
2374 #undef DATA_REC
2375 #undef FILTER
2376 #undef YES
2377 #undef NO
2378
2379 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2380
2381 static int test_pred_visited;
2382
2383 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2384 {
2385 struct ftrace_event_field *field = pred->field;
2386
2387 test_pred_visited = 1;
2388 printk(KERN_INFO "\npred visited %s\n", field->name);
2389 return 1;
2390 }
2391
2392 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2393 int *err, void *data)
2394 {
2395 char *fields = data;
2396
2397 if ((move == MOVE_DOWN) &&
2398 (pred->left == FILTER_PRED_INVALID)) {
2399 struct ftrace_event_field *field = pred->field;
2400
2401 if (!field) {
2402 WARN(1, "all leafs should have field defined");
2403 return WALK_PRED_DEFAULT;
2404 }
2405 if (!strchr(fields, *field->name))
2406 return WALK_PRED_DEFAULT;
2407
2408 WARN_ON(!pred->fn);
2409 pred->fn = test_pred_visited_fn;
2410 }
2411 return WALK_PRED_DEFAULT;
2412 }
2413
2414 static __init int ftrace_test_event_filter(void)
2415 {
2416 int i;
2417
2418 printk(KERN_INFO "Testing ftrace filter: ");
2419
2420 for (i = 0; i < DATA_CNT; i++) {
2421 struct event_filter *filter = NULL;
2422 struct test_filter_data_t *d = &test_filter_data[i];
2423 int err;
2424
2425 err = create_filter(&event_ftrace_test_filter, d->filter,
2426 false, &filter);
2427 if (err) {
2428 printk(KERN_INFO
2429 "Failed to get filter for '%s', err %d\n",
2430 d->filter, err);
2431 __free_filter(filter);
2432 break;
2433 }
2434
2435 /*
2436 * The preemption disabling is not really needed for self
2437 * tests, but the rcu dereference will complain without it.
2438 */
2439 preempt_disable();
2440 if (*d->not_visited)
2441 walk_pred_tree(filter->preds, filter->root,
2442 test_walk_pred_cb,
2443 d->not_visited);
2444
2445 test_pred_visited = 0;
2446 err = filter_match_preds(filter, &d->rec);
2447 preempt_enable();
2448
2449 __free_filter(filter);
2450
2451 if (test_pred_visited) {
2452 printk(KERN_INFO
2453 "Failed, unwanted pred visited for filter %s\n",
2454 d->filter);
2455 break;
2456 }
2457
2458 if (err != d->match) {
2459 printk(KERN_INFO
2460 "Failed to match filter '%s', expected %d\n",
2461 d->filter, d->match);
2462 break;
2463 }
2464 }
2465
2466 if (i == DATA_CNT)
2467 printk(KERN_CONT "OK\n");
2468
2469 return 0;
2470 }
2471
2472 late_initcall(ftrace_test_event_filter);
2473
2474 #endif /* CONFIG_FTRACE_STARTUP_TEST */