drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_events_filter.c
1 /*
2 * trace_events_filter - generic event filtering
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 #define DEFAULT_SYS_FILTER_MESSAGE \
31 "### global filter ###\n" \
32 "# Use this to set filters for multiple events.\n" \
33 "# Only events with the given fields will be affected.\n" \
34 "# If no events are modified, an error message will be displayed here"
35
36 enum filter_op_ids
37 {
38 OP_OR,
39 OP_AND,
40 OP_GLOB,
41 OP_NE,
42 OP_EQ,
43 OP_LT,
44 OP_LE,
45 OP_GT,
46 OP_GE,
47 OP_NONE,
48 OP_OPEN_PAREN,
49 };
50
51 struct filter_op {
52 int id;
53 char *string;
54 int precedence;
55 };
56
57 static struct filter_op filter_ops[] = {
58 { OP_OR, "||", 1 },
59 { OP_AND, "&&", 2 },
60 { OP_GLOB, "~", 4 },
61 { OP_NE, "!=", 4 },
62 { OP_EQ, "==", 4 },
63 { OP_LT, "<", 5 },
64 { OP_LE, "<=", 5 },
65 { OP_GT, ">", 5 },
66 { OP_GE, ">=", 5 },
67 { OP_NONE, "OP_NONE", 0 },
68 { OP_OPEN_PAREN, "(", 0 },
69 };
70
71 enum {
72 FILT_ERR_NONE,
73 FILT_ERR_INVALID_OP,
74 FILT_ERR_UNBALANCED_PAREN,
75 FILT_ERR_TOO_MANY_OPERANDS,
76 FILT_ERR_OPERAND_TOO_LONG,
77 FILT_ERR_FIELD_NOT_FOUND,
78 FILT_ERR_ILLEGAL_FIELD_OP,
79 FILT_ERR_ILLEGAL_INTVAL,
80 FILT_ERR_BAD_SUBSYS_FILTER,
81 FILT_ERR_TOO_MANY_PREDS,
82 FILT_ERR_MISSING_FIELD,
83 FILT_ERR_INVALID_FILTER,
84 FILT_ERR_IP_FIELD_ONLY,
85 };
86
87 static char *err_text[] = {
88 "No error",
89 "Invalid operator",
90 "Unbalanced parens",
91 "Too many operands",
92 "Operand too long",
93 "Field not found",
94 "Illegal operation for field type",
95 "Illegal integer value",
96 "Couldn't find or set field in one of a subsystem's events",
97 "Too many terms in predicate expression",
98 "Missing field name and/or value",
99 "Meaningless filter expression",
100 "Only 'ip' field is supported for function trace",
101 };
102
103 struct opstack_op {
104 int op;
105 struct list_head list;
106 };
107
108 struct postfix_elt {
109 int op;
110 char *operand;
111 struct list_head list;
112 };
113
114 struct filter_parse_state {
115 struct filter_op *ops;
116 struct list_head opstack;
117 struct list_head postfix;
118 int lasterr;
119 int lasterr_pos;
120
121 struct {
122 char *string;
123 unsigned int cnt;
124 unsigned int tail;
125 } infix;
126
127 struct {
128 char string[MAX_FILTER_STR_VAL];
129 int pos;
130 unsigned int tail;
131 } operand;
132 };
133
134 struct pred_stack {
135 struct filter_pred **preds;
136 int index;
137 };
138
139 #define DEFINE_COMPARISON_PRED(type) \
140 static int filter_pred_##type(struct filter_pred *pred, void *event) \
141 { \
142 type *addr = (type *)(event + pred->offset); \
143 type val = (type)pred->val; \
144 int match = 0; \
145 \
146 switch (pred->op) { \
147 case OP_LT: \
148 match = (*addr < val); \
149 break; \
150 case OP_LE: \
151 match = (*addr <= val); \
152 break; \
153 case OP_GT: \
154 match = (*addr > val); \
155 break; \
156 case OP_GE: \
157 match = (*addr >= val); \
158 break; \
159 default: \
160 break; \
161 } \
162 \
163 return match; \
164 }
165
166 #define DEFINE_EQUALITY_PRED(size) \
167 static int filter_pred_##size(struct filter_pred *pred, void *event) \
168 { \
169 u##size *addr = (u##size *)(event + pred->offset); \
170 u##size val = (u##size)pred->val; \
171 int match; \
172 \
173 match = (val == *addr) ^ pred->not; \
174 \
175 return match; \
176 }
177
178 DEFINE_COMPARISON_PRED(s64);
179 DEFINE_COMPARISON_PRED(u64);
180 DEFINE_COMPARISON_PRED(s32);
181 DEFINE_COMPARISON_PRED(u32);
182 DEFINE_COMPARISON_PRED(s16);
183 DEFINE_COMPARISON_PRED(u16);
184 DEFINE_COMPARISON_PRED(s8);
185 DEFINE_COMPARISON_PRED(u8);
186
187 DEFINE_EQUALITY_PRED(64);
188 DEFINE_EQUALITY_PRED(32);
189 DEFINE_EQUALITY_PRED(16);
190 DEFINE_EQUALITY_PRED(8);
191
192 /* Filter predicate for fixed sized arrays of characters */
193 static int filter_pred_string(struct filter_pred *pred, void *event)
194 {
195 char *addr = (char *)(event + pred->offset);
196 int cmp, match;
197
198 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
199
200 match = cmp ^ pred->not;
201
202 return match;
203 }
204
205 /* Filter predicate for char * pointers */
206 static int filter_pred_pchar(struct filter_pred *pred, void *event)
207 {
208 char **addr = (char **)(event + pred->offset);
209 int cmp, match;
210 int len = strlen(*addr) + 1; /* including tailing '\0' */
211
212 cmp = pred->regex.match(*addr, &pred->regex, len);
213
214 match = cmp ^ pred->not;
215
216 return match;
217 }
218
219 /*
220 * Filter predicate for dynamic sized arrays of characters.
221 * These are implemented through a list of strings at the end
222 * of the entry.
223 * Also each of these strings have a field in the entry which
224 * contains its offset from the beginning of the entry.
225 * We have then first to get this field, dereference it
226 * and add it to the address of the entry, and at last we have
227 * the address of the string.
228 */
229 static int filter_pred_strloc(struct filter_pred *pred, void *event)
230 {
231 u32 str_item = *(u32 *)(event + pred->offset);
232 int str_loc = str_item & 0xffff;
233 int str_len = str_item >> 16;
234 char *addr = (char *)(event + str_loc);
235 int cmp, match;
236
237 cmp = pred->regex.match(addr, &pred->regex, str_len);
238
239 match = cmp ^ pred->not;
240
241 return match;
242 }
243
244 static int filter_pred_none(struct filter_pred *pred, void *event)
245 {
246 return 0;
247 }
248
249 /*
250 * regex_match_foo - Basic regex callbacks
251 *
252 * @str: the string to be searched
253 * @r: the regex structure containing the pattern string
254 * @len: the length of the string to be searched (including '\0')
255 *
256 * Note:
257 * - @str might not be NULL-terminated if it's of type DYN_STRING
258 * or STATIC_STRING
259 */
260
261 static int regex_match_full(char *str, struct regex *r, int len)
262 {
263 if (strncmp(str, r->pattern, len) == 0)
264 return 1;
265 return 0;
266 }
267
268 static int regex_match_front(char *str, struct regex *r, int len)
269 {
270 if (strncmp(str, r->pattern, r->len) == 0)
271 return 1;
272 return 0;
273 }
274
275 static int regex_match_middle(char *str, struct regex *r, int len)
276 {
277 if (strnstr(str, r->pattern, len))
278 return 1;
279 return 0;
280 }
281
282 static int regex_match_end(char *str, struct regex *r, int len)
283 {
284 int strlen = len - 1;
285
286 if (strlen >= r->len &&
287 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
288 return 1;
289 return 0;
290 }
291
292 /**
293 * filter_parse_regex - parse a basic regex
294 * @buff: the raw regex
295 * @len: length of the regex
296 * @search: will point to the beginning of the string to compare
297 * @not: tell whether the match will have to be inverted
298 *
299 * This passes in a buffer containing a regex and this function will
300 * set search to point to the search part of the buffer and
301 * return the type of search it is (see enum above).
302 * This does modify buff.
303 *
304 * Returns enum type.
305 * search returns the pointer to use for comparison.
306 * not returns 1 if buff started with a '!'
307 * 0 otherwise.
308 */
309 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
310 {
311 int type = MATCH_FULL;
312 int i;
313
314 if (buff[0] == '!') {
315 *not = 1;
316 buff++;
317 len--;
318 } else
319 *not = 0;
320
321 *search = buff;
322
323 for (i = 0; i < len; i++) {
324 if (buff[i] == '*') {
325 if (!i) {
326 *search = buff + 1;
327 type = MATCH_END_ONLY;
328 } else {
329 if (type == MATCH_END_ONLY)
330 type = MATCH_MIDDLE_ONLY;
331 else
332 type = MATCH_FRONT_ONLY;
333 buff[i] = 0;
334 break;
335 }
336 }
337 }
338
339 return type;
340 }
341
342 static void filter_build_regex(struct filter_pred *pred)
343 {
344 struct regex *r = &pred->regex;
345 char *search;
346 enum regex_type type = MATCH_FULL;
347 int not = 0;
348
349 if (pred->op == OP_GLOB) {
350 type = filter_parse_regex(r->pattern, r->len, &search, &not);
351 r->len = strlen(search);
352 memmove(r->pattern, search, r->len+1);
353 }
354
355 switch (type) {
356 case MATCH_FULL:
357 r->match = regex_match_full;
358 break;
359 case MATCH_FRONT_ONLY:
360 r->match = regex_match_front;
361 break;
362 case MATCH_MIDDLE_ONLY:
363 r->match = regex_match_middle;
364 break;
365 case MATCH_END_ONLY:
366 r->match = regex_match_end;
367 break;
368 }
369
370 pred->not ^= not;
371 }
372
373 enum move_type {
374 MOVE_DOWN,
375 MOVE_UP_FROM_LEFT,
376 MOVE_UP_FROM_RIGHT
377 };
378
379 static struct filter_pred *
380 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
381 int index, enum move_type *move)
382 {
383 if (pred->parent & FILTER_PRED_IS_RIGHT)
384 *move = MOVE_UP_FROM_RIGHT;
385 else
386 *move = MOVE_UP_FROM_LEFT;
387 pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
388
389 return pred;
390 }
391
392 enum walk_return {
393 WALK_PRED_ABORT,
394 WALK_PRED_PARENT,
395 WALK_PRED_DEFAULT,
396 };
397
398 typedef int (*filter_pred_walkcb_t) (enum move_type move,
399 struct filter_pred *pred,
400 int *err, void *data);
401
402 static int walk_pred_tree(struct filter_pred *preds,
403 struct filter_pred *root,
404 filter_pred_walkcb_t cb, void *data)
405 {
406 struct filter_pred *pred = root;
407 enum move_type move = MOVE_DOWN;
408 int done = 0;
409
410 if (!preds)
411 return -EINVAL;
412
413 do {
414 int err = 0, ret;
415
416 ret = cb(move, pred, &err, data);
417 if (ret == WALK_PRED_ABORT)
418 return err;
419 if (ret == WALK_PRED_PARENT)
420 goto get_parent;
421
422 switch (move) {
423 case MOVE_DOWN:
424 if (pred->left != FILTER_PRED_INVALID) {
425 pred = &preds[pred->left];
426 continue;
427 }
428 goto get_parent;
429 case MOVE_UP_FROM_LEFT:
430 pred = &preds[pred->right];
431 move = MOVE_DOWN;
432 continue;
433 case MOVE_UP_FROM_RIGHT:
434 get_parent:
435 if (pred == root)
436 break;
437 pred = get_pred_parent(pred, preds,
438 pred->parent,
439 &move);
440 continue;
441 }
442 done = 1;
443 } while (!done);
444
445 /* We are fine. */
446 return 0;
447 }
448
449 /*
450 * A series of AND or ORs where found together. Instead of
451 * climbing up and down the tree branches, an array of the
452 * ops were made in order of checks. We can just move across
453 * the array and short circuit if needed.
454 */
455 static int process_ops(struct filter_pred *preds,
456 struct filter_pred *op, void *rec)
457 {
458 struct filter_pred *pred;
459 int match = 0;
460 int type;
461 int i;
462
463 /*
464 * Micro-optimization: We set type to true if op
465 * is an OR and false otherwise (AND). Then we
466 * just need to test if the match is equal to
467 * the type, and if it is, we can short circuit the
468 * rest of the checks:
469 *
470 * if ((match && op->op == OP_OR) ||
471 * (!match && op->op == OP_AND))
472 * return match;
473 */
474 type = op->op == OP_OR;
475
476 for (i = 0; i < op->val; i++) {
477 pred = &preds[op->ops[i]];
478 if (!WARN_ON_ONCE(!pred->fn))
479 match = pred->fn(pred, rec);
480 if (!!match == type)
481 return match;
482 }
483 return match;
484 }
485
486 struct filter_match_preds_data {
487 struct filter_pred *preds;
488 int match;
489 void *rec;
490 };
491
492 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
493 int *err, void *data)
494 {
495 struct filter_match_preds_data *d = data;
496
497 *err = 0;
498 switch (move) {
499 case MOVE_DOWN:
500 /* only AND and OR have children */
501 if (pred->left != FILTER_PRED_INVALID) {
502 /* If ops is set, then it was folded. */
503 if (!pred->ops)
504 return WALK_PRED_DEFAULT;
505 /* We can treat folded ops as a leaf node */
506 d->match = process_ops(d->preds, pred, d->rec);
507 } else {
508 if (!WARN_ON_ONCE(!pred->fn))
509 d->match = pred->fn(pred, d->rec);
510 }
511
512 return WALK_PRED_PARENT;
513 case MOVE_UP_FROM_LEFT:
514 /*
515 * Check for short circuits.
516 *
517 * Optimization: !!match == (pred->op == OP_OR)
518 * is the same as:
519 * if ((match && pred->op == OP_OR) ||
520 * (!match && pred->op == OP_AND))
521 */
522 if (!!d->match == (pred->op == OP_OR))
523 return WALK_PRED_PARENT;
524 break;
525 case MOVE_UP_FROM_RIGHT:
526 break;
527 }
528
529 return WALK_PRED_DEFAULT;
530 }
531
532 /* return 1 if event matches, 0 otherwise (discard) */
533 int filter_match_preds(struct event_filter *filter, void *rec)
534 {
535 struct filter_pred *preds;
536 struct filter_pred *root;
537 struct filter_match_preds_data data = {
538 /* match is currently meaningless */
539 .match = -1,
540 .rec = rec,
541 };
542 int n_preds, ret;
543
544 /* no filter is considered a match */
545 if (!filter)
546 return 1;
547
548 n_preds = filter->n_preds;
549 if (!n_preds)
550 return 1;
551
552 /*
553 * n_preds, root and filter->preds are protect with preemption disabled.
554 */
555 root = rcu_dereference_sched(filter->root);
556 if (!root)
557 return 1;
558
559 data.preds = preds = rcu_dereference_sched(filter->preds);
560 ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
561 WARN_ON(ret);
562 return data.match;
563 }
564 EXPORT_SYMBOL_GPL(filter_match_preds);
565
566 static void parse_error(struct filter_parse_state *ps, int err, int pos)
567 {
568 ps->lasterr = err;
569 ps->lasterr_pos = pos;
570 }
571
572 static void remove_filter_string(struct event_filter *filter)
573 {
574 if (!filter)
575 return;
576
577 kfree(filter->filter_string);
578 filter->filter_string = NULL;
579 }
580
581 static int replace_filter_string(struct event_filter *filter,
582 char *filter_string)
583 {
584 kfree(filter->filter_string);
585 filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
586 if (!filter->filter_string)
587 return -ENOMEM;
588
589 return 0;
590 }
591
592 static int append_filter_string(struct event_filter *filter,
593 char *string)
594 {
595 int newlen;
596 char *new_filter_string;
597
598 BUG_ON(!filter->filter_string);
599 newlen = strlen(filter->filter_string) + strlen(string) + 1;
600 new_filter_string = kmalloc(newlen, GFP_KERNEL);
601 if (!new_filter_string)
602 return -ENOMEM;
603
604 strcpy(new_filter_string, filter->filter_string);
605 strcat(new_filter_string, string);
606 kfree(filter->filter_string);
607 filter->filter_string = new_filter_string;
608
609 return 0;
610 }
611
612 static void append_filter_err(struct filter_parse_state *ps,
613 struct event_filter *filter)
614 {
615 int pos = ps->lasterr_pos;
616 char *buf, *pbuf;
617
618 buf = (char *)__get_free_page(GFP_TEMPORARY);
619 if (!buf)
620 return;
621
622 append_filter_string(filter, "\n");
623 memset(buf, ' ', PAGE_SIZE);
624 if (pos > PAGE_SIZE - 128)
625 pos = 0;
626 buf[pos] = '^';
627 pbuf = &buf[pos] + 1;
628
629 sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
630 append_filter_string(filter, buf);
631 free_page((unsigned long) buf);
632 }
633
634 /* caller must hold event_mutex */
635 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
636 {
637 struct event_filter *filter = call->filter;
638
639 if (filter && filter->filter_string)
640 trace_seq_printf(s, "%s\n", filter->filter_string);
641 else
642 trace_seq_printf(s, "none\n");
643 }
644
645 void print_subsystem_event_filter(struct event_subsystem *system,
646 struct trace_seq *s)
647 {
648 struct event_filter *filter;
649
650 mutex_lock(&event_mutex);
651 filter = system->filter;
652 if (filter && filter->filter_string)
653 trace_seq_printf(s, "%s\n", filter->filter_string);
654 else
655 trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
656 mutex_unlock(&event_mutex);
657 }
658
659 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
660 {
661 stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
662 if (!stack->preds)
663 return -ENOMEM;
664 stack->index = n_preds;
665 return 0;
666 }
667
668 static void __free_pred_stack(struct pred_stack *stack)
669 {
670 kfree(stack->preds);
671 stack->index = 0;
672 }
673
674 static int __push_pred_stack(struct pred_stack *stack,
675 struct filter_pred *pred)
676 {
677 int index = stack->index;
678
679 if (WARN_ON(index == 0))
680 return -ENOSPC;
681
682 stack->preds[--index] = pred;
683 stack->index = index;
684 return 0;
685 }
686
687 static struct filter_pred *
688 __pop_pred_stack(struct pred_stack *stack)
689 {
690 struct filter_pred *pred;
691 int index = stack->index;
692
693 pred = stack->preds[index++];
694 if (!pred)
695 return NULL;
696
697 stack->index = index;
698 return pred;
699 }
700
701 static int filter_set_pred(struct event_filter *filter,
702 int idx,
703 struct pred_stack *stack,
704 struct filter_pred *src)
705 {
706 struct filter_pred *dest = &filter->preds[idx];
707 struct filter_pred *left;
708 struct filter_pred *right;
709
710 *dest = *src;
711 dest->index = idx;
712
713 if (dest->op == OP_OR || dest->op == OP_AND) {
714 right = __pop_pred_stack(stack);
715 left = __pop_pred_stack(stack);
716 if (!left || !right)
717 return -EINVAL;
718 /*
719 * If both children can be folded
720 * and they are the same op as this op or a leaf,
721 * then this op can be folded.
722 */
723 if (left->index & FILTER_PRED_FOLD &&
724 (left->op == dest->op ||
725 left->left == FILTER_PRED_INVALID) &&
726 right->index & FILTER_PRED_FOLD &&
727 (right->op == dest->op ||
728 right->left == FILTER_PRED_INVALID))
729 dest->index |= FILTER_PRED_FOLD;
730
731 dest->left = left->index & ~FILTER_PRED_FOLD;
732 dest->right = right->index & ~FILTER_PRED_FOLD;
733 left->parent = dest->index & ~FILTER_PRED_FOLD;
734 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
735 } else {
736 /*
737 * Make dest->left invalid to be used as a quick
738 * way to know this is a leaf node.
739 */
740 dest->left = FILTER_PRED_INVALID;
741
742 /* All leafs allow folding the parent ops. */
743 dest->index |= FILTER_PRED_FOLD;
744 }
745
746 return __push_pred_stack(stack, dest);
747 }
748
749 static void __free_preds(struct event_filter *filter)
750 {
751 int i;
752
753 if (filter->preds) {
754 for (i = 0; i < filter->n_preds; i++)
755 kfree(filter->preds[i].ops);
756 kfree(filter->preds);
757 filter->preds = NULL;
758 }
759 filter->a_preds = 0;
760 filter->n_preds = 0;
761 }
762
763 static void filter_disable(struct ftrace_event_call *call)
764 {
765 call->flags &= ~TRACE_EVENT_FL_FILTERED;
766 }
767
768 static void __free_filter(struct event_filter *filter)
769 {
770 if (!filter)
771 return;
772
773 __free_preds(filter);
774 kfree(filter->filter_string);
775 kfree(filter);
776 }
777
778 /*
779 * Called when destroying the ftrace_event_call.
780 * The call is being freed, so we do not need to worry about
781 * the call being currently used. This is for module code removing
782 * the tracepoints from within it.
783 */
784 void destroy_preds(struct ftrace_event_call *call)
785 {
786 __free_filter(call->filter);
787 call->filter = NULL;
788 }
789
790 static struct event_filter *__alloc_filter(void)
791 {
792 struct event_filter *filter;
793
794 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
795 return filter;
796 }
797
798 static int __alloc_preds(struct event_filter *filter, int n_preds)
799 {
800 struct filter_pred *pred;
801 int i;
802
803 if (filter->preds)
804 __free_preds(filter);
805
806 filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
807
808 if (!filter->preds)
809 return -ENOMEM;
810
811 filter->a_preds = n_preds;
812 filter->n_preds = 0;
813
814 for (i = 0; i < n_preds; i++) {
815 pred = &filter->preds[i];
816 pred->fn = filter_pred_none;
817 }
818
819 return 0;
820 }
821
822 static void filter_free_subsystem_preds(struct event_subsystem *system)
823 {
824 struct ftrace_event_call *call;
825
826 list_for_each_entry(call, &ftrace_events, list) {
827 if (strcmp(call->class->system, system->name) != 0)
828 continue;
829
830 filter_disable(call);
831 remove_filter_string(call->filter);
832 }
833 }
834
835 static void filter_free_subsystem_filters(struct event_subsystem *system)
836 {
837 struct ftrace_event_call *call;
838
839 list_for_each_entry(call, &ftrace_events, list) {
840 if (strcmp(call->class->system, system->name) != 0)
841 continue;
842 __free_filter(call->filter);
843 call->filter = NULL;
844 }
845 }
846
847 static int filter_add_pred(struct filter_parse_state *ps,
848 struct event_filter *filter,
849 struct filter_pred *pred,
850 struct pred_stack *stack)
851 {
852 int err;
853
854 if (WARN_ON(filter->n_preds == filter->a_preds)) {
855 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
856 return -ENOSPC;
857 }
858
859 err = filter_set_pred(filter, filter->n_preds, stack, pred);
860 if (err)
861 return err;
862
863 filter->n_preds++;
864
865 return 0;
866 }
867
868 int filter_assign_type(const char *type)
869 {
870 if (strstr(type, "__data_loc") && strstr(type, "char"))
871 return FILTER_DYN_STRING;
872
873 if (strchr(type, '[') && strstr(type, "char"))
874 return FILTER_STATIC_STRING;
875
876 return FILTER_OTHER;
877 }
878
879 static bool is_function_field(struct ftrace_event_field *field)
880 {
881 return field->filter_type == FILTER_TRACE_FN;
882 }
883
884 static bool is_string_field(struct ftrace_event_field *field)
885 {
886 return field->filter_type == FILTER_DYN_STRING ||
887 field->filter_type == FILTER_STATIC_STRING ||
888 field->filter_type == FILTER_PTR_STRING;
889 }
890
891 static int is_legal_op(struct ftrace_event_field *field, int op)
892 {
893 if (is_string_field(field) &&
894 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
895 return 0;
896 if (!is_string_field(field) && op == OP_GLOB)
897 return 0;
898
899 return 1;
900 }
901
902 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
903 int field_is_signed)
904 {
905 filter_pred_fn_t fn = NULL;
906
907 switch (field_size) {
908 case 8:
909 if (op == OP_EQ || op == OP_NE)
910 fn = filter_pred_64;
911 else if (field_is_signed)
912 fn = filter_pred_s64;
913 else
914 fn = filter_pred_u64;
915 break;
916 case 4:
917 if (op == OP_EQ || op == OP_NE)
918 fn = filter_pred_32;
919 else if (field_is_signed)
920 fn = filter_pred_s32;
921 else
922 fn = filter_pred_u32;
923 break;
924 case 2:
925 if (op == OP_EQ || op == OP_NE)
926 fn = filter_pred_16;
927 else if (field_is_signed)
928 fn = filter_pred_s16;
929 else
930 fn = filter_pred_u16;
931 break;
932 case 1:
933 if (op == OP_EQ || op == OP_NE)
934 fn = filter_pred_8;
935 else if (field_is_signed)
936 fn = filter_pred_s8;
937 else
938 fn = filter_pred_u8;
939 break;
940 }
941
942 return fn;
943 }
944
945 static int init_pred(struct filter_parse_state *ps,
946 struct ftrace_event_field *field,
947 struct filter_pred *pred)
948
949 {
950 filter_pred_fn_t fn = filter_pred_none;
951 unsigned long long val;
952 int ret;
953
954 pred->offset = field->offset;
955
956 if (!is_legal_op(field, pred->op)) {
957 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
958 return -EINVAL;
959 }
960
961 if (is_string_field(field)) {
962 filter_build_regex(pred);
963
964 if (field->filter_type == FILTER_STATIC_STRING) {
965 fn = filter_pred_string;
966 pred->regex.field_len = field->size;
967 } else if (field->filter_type == FILTER_DYN_STRING)
968 fn = filter_pred_strloc;
969 else
970 fn = filter_pred_pchar;
971 } else if (is_function_field(field)) {
972 if (strcmp(field->name, "ip")) {
973 parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
974 return -EINVAL;
975 }
976 } else {
977 if (field->is_signed)
978 ret = kstrtoll(pred->regex.pattern, 0, &val);
979 else
980 ret = kstrtoull(pred->regex.pattern, 0, &val);
981 if (ret) {
982 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
983 return -EINVAL;
984 }
985 pred->val = val;
986
987 fn = select_comparison_fn(pred->op, field->size,
988 field->is_signed);
989 if (!fn) {
990 parse_error(ps, FILT_ERR_INVALID_OP, 0);
991 return -EINVAL;
992 }
993 }
994
995 if (pred->op == OP_NE)
996 pred->not = 1;
997
998 pred->fn = fn;
999 return 0;
1000 }
1001
1002 static void parse_init(struct filter_parse_state *ps,
1003 struct filter_op *ops,
1004 char *infix_string)
1005 {
1006 memset(ps, '\0', sizeof(*ps));
1007
1008 ps->infix.string = infix_string;
1009 ps->infix.cnt = strlen(infix_string);
1010 ps->ops = ops;
1011
1012 INIT_LIST_HEAD(&ps->opstack);
1013 INIT_LIST_HEAD(&ps->postfix);
1014 }
1015
1016 static char infix_next(struct filter_parse_state *ps)
1017 {
1018 if (!ps->infix.cnt)
1019 return 0;
1020
1021 ps->infix.cnt--;
1022
1023 return ps->infix.string[ps->infix.tail++];
1024 }
1025
1026 static char infix_peek(struct filter_parse_state *ps)
1027 {
1028 if (ps->infix.tail == strlen(ps->infix.string))
1029 return 0;
1030
1031 return ps->infix.string[ps->infix.tail];
1032 }
1033
1034 static void infix_advance(struct filter_parse_state *ps)
1035 {
1036 if (!ps->infix.cnt)
1037 return;
1038
1039 ps->infix.cnt--;
1040 ps->infix.tail++;
1041 }
1042
1043 static inline int is_precedence_lower(struct filter_parse_state *ps,
1044 int a, int b)
1045 {
1046 return ps->ops[a].precedence < ps->ops[b].precedence;
1047 }
1048
1049 static inline int is_op_char(struct filter_parse_state *ps, char c)
1050 {
1051 int i;
1052
1053 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1054 if (ps->ops[i].string[0] == c)
1055 return 1;
1056 }
1057
1058 return 0;
1059 }
1060
1061 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1062 {
1063 char nextc = infix_peek(ps);
1064 char opstr[3];
1065 int i;
1066
1067 opstr[0] = firstc;
1068 opstr[1] = nextc;
1069 opstr[2] = '\0';
1070
1071 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1072 if (!strcmp(opstr, ps->ops[i].string)) {
1073 infix_advance(ps);
1074 return ps->ops[i].id;
1075 }
1076 }
1077
1078 opstr[1] = '\0';
1079
1080 for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1081 if (!strcmp(opstr, ps->ops[i].string))
1082 return ps->ops[i].id;
1083 }
1084
1085 return OP_NONE;
1086 }
1087
1088 static inline void clear_operand_string(struct filter_parse_state *ps)
1089 {
1090 memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1091 ps->operand.tail = 0;
1092 }
1093
1094 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1095 {
1096 if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1097 return -EINVAL;
1098
1099 ps->operand.string[ps->operand.tail++] = c;
1100
1101 return 0;
1102 }
1103
1104 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1105 {
1106 struct opstack_op *opstack_op;
1107
1108 opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1109 if (!opstack_op)
1110 return -ENOMEM;
1111
1112 opstack_op->op = op;
1113 list_add(&opstack_op->list, &ps->opstack);
1114
1115 return 0;
1116 }
1117
1118 static int filter_opstack_empty(struct filter_parse_state *ps)
1119 {
1120 return list_empty(&ps->opstack);
1121 }
1122
1123 static int filter_opstack_top(struct filter_parse_state *ps)
1124 {
1125 struct opstack_op *opstack_op;
1126
1127 if (filter_opstack_empty(ps))
1128 return OP_NONE;
1129
1130 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1131
1132 return opstack_op->op;
1133 }
1134
1135 static int filter_opstack_pop(struct filter_parse_state *ps)
1136 {
1137 struct opstack_op *opstack_op;
1138 int op;
1139
1140 if (filter_opstack_empty(ps))
1141 return OP_NONE;
1142
1143 opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1144 op = opstack_op->op;
1145 list_del(&opstack_op->list);
1146
1147 kfree(opstack_op);
1148
1149 return op;
1150 }
1151
1152 static void filter_opstack_clear(struct filter_parse_state *ps)
1153 {
1154 while (!filter_opstack_empty(ps))
1155 filter_opstack_pop(ps);
1156 }
1157
1158 static char *curr_operand(struct filter_parse_state *ps)
1159 {
1160 return ps->operand.string;
1161 }
1162
1163 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1164 {
1165 struct postfix_elt *elt;
1166
1167 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1168 if (!elt)
1169 return -ENOMEM;
1170
1171 elt->op = OP_NONE;
1172 elt->operand = kstrdup(operand, GFP_KERNEL);
1173 if (!elt->operand) {
1174 kfree(elt);
1175 return -ENOMEM;
1176 }
1177
1178 list_add_tail(&elt->list, &ps->postfix);
1179
1180 return 0;
1181 }
1182
1183 static int postfix_append_op(struct filter_parse_state *ps, int op)
1184 {
1185 struct postfix_elt *elt;
1186
1187 elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1188 if (!elt)
1189 return -ENOMEM;
1190
1191 elt->op = op;
1192 elt->operand = NULL;
1193
1194 list_add_tail(&elt->list, &ps->postfix);
1195
1196 return 0;
1197 }
1198
1199 static void postfix_clear(struct filter_parse_state *ps)
1200 {
1201 struct postfix_elt *elt;
1202
1203 while (!list_empty(&ps->postfix)) {
1204 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1205 list_del(&elt->list);
1206 kfree(elt->operand);
1207 kfree(elt);
1208 }
1209 }
1210
1211 static int filter_parse(struct filter_parse_state *ps)
1212 {
1213 int in_string = 0;
1214 int op, top_op;
1215 char ch;
1216
1217 while ((ch = infix_next(ps))) {
1218 if (ch == '"') {
1219 in_string ^= 1;
1220 continue;
1221 }
1222
1223 if (in_string)
1224 goto parse_operand;
1225
1226 if (isspace(ch))
1227 continue;
1228
1229 if (is_op_char(ps, ch)) {
1230 op = infix_get_op(ps, ch);
1231 if (op == OP_NONE) {
1232 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1233 return -EINVAL;
1234 }
1235
1236 if (strlen(curr_operand(ps))) {
1237 postfix_append_operand(ps, curr_operand(ps));
1238 clear_operand_string(ps);
1239 }
1240
1241 while (!filter_opstack_empty(ps)) {
1242 top_op = filter_opstack_top(ps);
1243 if (!is_precedence_lower(ps, top_op, op)) {
1244 top_op = filter_opstack_pop(ps);
1245 postfix_append_op(ps, top_op);
1246 continue;
1247 }
1248 break;
1249 }
1250
1251 filter_opstack_push(ps, op);
1252 continue;
1253 }
1254
1255 if (ch == '(') {
1256 filter_opstack_push(ps, OP_OPEN_PAREN);
1257 continue;
1258 }
1259
1260 if (ch == ')') {
1261 if (strlen(curr_operand(ps))) {
1262 postfix_append_operand(ps, curr_operand(ps));
1263 clear_operand_string(ps);
1264 }
1265
1266 top_op = filter_opstack_pop(ps);
1267 while (top_op != OP_NONE) {
1268 if (top_op == OP_OPEN_PAREN)
1269 break;
1270 postfix_append_op(ps, top_op);
1271 top_op = filter_opstack_pop(ps);
1272 }
1273 if (top_op == OP_NONE) {
1274 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1275 return -EINVAL;
1276 }
1277 continue;
1278 }
1279 parse_operand:
1280 if (append_operand_char(ps, ch)) {
1281 parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1282 return -EINVAL;
1283 }
1284 }
1285
1286 if (strlen(curr_operand(ps)))
1287 postfix_append_operand(ps, curr_operand(ps));
1288
1289 while (!filter_opstack_empty(ps)) {
1290 top_op = filter_opstack_pop(ps);
1291 if (top_op == OP_NONE)
1292 break;
1293 if (top_op == OP_OPEN_PAREN) {
1294 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1295 return -EINVAL;
1296 }
1297 postfix_append_op(ps, top_op);
1298 }
1299
1300 return 0;
1301 }
1302
1303 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1304 struct ftrace_event_call *call,
1305 int op, char *operand1, char *operand2)
1306 {
1307 struct ftrace_event_field *field;
1308 static struct filter_pred pred;
1309
1310 memset(&pred, 0, sizeof(pred));
1311 pred.op = op;
1312
1313 if (op == OP_AND || op == OP_OR)
1314 return &pred;
1315
1316 if (!operand1 || !operand2) {
1317 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1318 return NULL;
1319 }
1320
1321 field = trace_find_event_field(call, operand1);
1322 if (!field) {
1323 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1324 return NULL;
1325 }
1326
1327 strcpy(pred.regex.pattern, operand2);
1328 pred.regex.len = strlen(pred.regex.pattern);
1329 pred.field = field;
1330 return init_pred(ps, field, &pred) ? NULL : &pred;
1331 }
1332
1333 static int check_preds(struct filter_parse_state *ps)
1334 {
1335 int n_normal_preds = 0, n_logical_preds = 0;
1336 struct postfix_elt *elt;
1337 int cnt = 0;
1338
1339 list_for_each_entry(elt, &ps->postfix, list) {
1340 if (elt->op == OP_NONE) {
1341 cnt++;
1342 continue;
1343 }
1344
1345 cnt--;
1346 if (elt->op == OP_AND || elt->op == OP_OR) {
1347 n_logical_preds++;
1348 continue;
1349 }
1350 n_normal_preds++;
1351 /* all ops should have operands */
1352 if (cnt < 0)
1353 break;
1354 }
1355
1356 if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
1357 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1358 return -EINVAL;
1359 }
1360
1361 return 0;
1362 }
1363
1364 static int count_preds(struct filter_parse_state *ps)
1365 {
1366 struct postfix_elt *elt;
1367 int n_preds = 0;
1368
1369 list_for_each_entry(elt, &ps->postfix, list) {
1370 if (elt->op == OP_NONE)
1371 continue;
1372 n_preds++;
1373 }
1374
1375 return n_preds;
1376 }
1377
1378 struct check_pred_data {
1379 int count;
1380 int max;
1381 };
1382
1383 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1384 int *err, void *data)
1385 {
1386 struct check_pred_data *d = data;
1387
1388 if (WARN_ON(d->count++ > d->max)) {
1389 *err = -EINVAL;
1390 return WALK_PRED_ABORT;
1391 }
1392 return WALK_PRED_DEFAULT;
1393 }
1394
1395 /*
1396 * The tree is walked at filtering of an event. If the tree is not correctly
1397 * built, it may cause an infinite loop. Check here that the tree does
1398 * indeed terminate.
1399 */
1400 static int check_pred_tree(struct event_filter *filter,
1401 struct filter_pred *root)
1402 {
1403 struct check_pred_data data = {
1404 /*
1405 * The max that we can hit a node is three times.
1406 * Once going down, once coming up from left, and
1407 * once coming up from right. This is more than enough
1408 * since leafs are only hit a single time.
1409 */
1410 .max = 3 * filter->n_preds,
1411 .count = 0,
1412 };
1413
1414 return walk_pred_tree(filter->preds, root,
1415 check_pred_tree_cb, &data);
1416 }
1417
1418 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1419 int *err, void *data)
1420 {
1421 int *count = data;
1422
1423 if ((move == MOVE_DOWN) &&
1424 (pred->left == FILTER_PRED_INVALID))
1425 (*count)++;
1426
1427 return WALK_PRED_DEFAULT;
1428 }
1429
1430 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1431 {
1432 int count = 0, ret;
1433
1434 ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1435 WARN_ON(ret);
1436 return count;
1437 }
1438
1439 struct fold_pred_data {
1440 struct filter_pred *root;
1441 int count;
1442 int children;
1443 };
1444
1445 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1446 int *err, void *data)
1447 {
1448 struct fold_pred_data *d = data;
1449 struct filter_pred *root = d->root;
1450
1451 if (move != MOVE_DOWN)
1452 return WALK_PRED_DEFAULT;
1453 if (pred->left != FILTER_PRED_INVALID)
1454 return WALK_PRED_DEFAULT;
1455
1456 if (WARN_ON(d->count == d->children)) {
1457 *err = -EINVAL;
1458 return WALK_PRED_ABORT;
1459 }
1460
1461 pred->index &= ~FILTER_PRED_FOLD;
1462 root->ops[d->count++] = pred->index;
1463 return WALK_PRED_DEFAULT;
1464 }
1465
1466 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1467 {
1468 struct fold_pred_data data = {
1469 .root = root,
1470 .count = 0,
1471 };
1472 int children;
1473
1474 /* No need to keep the fold flag */
1475 root->index &= ~FILTER_PRED_FOLD;
1476
1477 /* If the root is a leaf then do nothing */
1478 if (root->left == FILTER_PRED_INVALID)
1479 return 0;
1480
1481 /* count the children */
1482 children = count_leafs(preds, &preds[root->left]);
1483 children += count_leafs(preds, &preds[root->right]);
1484
1485 root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1486 if (!root->ops)
1487 return -ENOMEM;
1488
1489 root->val = children;
1490 data.children = children;
1491 return walk_pred_tree(preds, root, fold_pred_cb, &data);
1492 }
1493
1494 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1495 int *err, void *data)
1496 {
1497 struct filter_pred *preds = data;
1498
1499 if (move != MOVE_DOWN)
1500 return WALK_PRED_DEFAULT;
1501 if (!(pred->index & FILTER_PRED_FOLD))
1502 return WALK_PRED_DEFAULT;
1503
1504 *err = fold_pred(preds, pred);
1505 if (*err)
1506 return WALK_PRED_ABORT;
1507
1508 /* eveyrhing below is folded, continue with parent */
1509 return WALK_PRED_PARENT;
1510 }
1511
1512 /*
1513 * To optimize the processing of the ops, if we have several "ors" or
1514 * "ands" together, we can put them in an array and process them all
1515 * together speeding up the filter logic.
1516 */
1517 static int fold_pred_tree(struct event_filter *filter,
1518 struct filter_pred *root)
1519 {
1520 return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1521 filter->preds);
1522 }
1523
1524 static int replace_preds(struct ftrace_event_call *call,
1525 struct event_filter *filter,
1526 struct filter_parse_state *ps,
1527 char *filter_string,
1528 bool dry_run)
1529 {
1530 char *operand1 = NULL, *operand2 = NULL;
1531 struct filter_pred *pred;
1532 struct filter_pred *root;
1533 struct postfix_elt *elt;
1534 struct pred_stack stack = { }; /* init to NULL */
1535 int err;
1536 int n_preds = 0;
1537
1538 n_preds = count_preds(ps);
1539 if (n_preds >= MAX_FILTER_PRED) {
1540 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1541 return -ENOSPC;
1542 }
1543
1544 err = check_preds(ps);
1545 if (err)
1546 return err;
1547
1548 if (!dry_run) {
1549 err = __alloc_pred_stack(&stack, n_preds);
1550 if (err)
1551 return err;
1552 err = __alloc_preds(filter, n_preds);
1553 if (err)
1554 goto fail;
1555 }
1556
1557 n_preds = 0;
1558 list_for_each_entry(elt, &ps->postfix, list) {
1559 if (elt->op == OP_NONE) {
1560 if (!operand1)
1561 operand1 = elt->operand;
1562 else if (!operand2)
1563 operand2 = elt->operand;
1564 else {
1565 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1566 err = -EINVAL;
1567 goto fail;
1568 }
1569 continue;
1570 }
1571
1572 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1573 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1574 err = -ENOSPC;
1575 goto fail;
1576 }
1577
1578 pred = create_pred(ps, call, elt->op, operand1, operand2);
1579 if (!pred) {
1580 err = -EINVAL;
1581 goto fail;
1582 }
1583
1584 if (!dry_run) {
1585 err = filter_add_pred(ps, filter, pred, &stack);
1586 if (err)
1587 goto fail;
1588 }
1589
1590 operand1 = operand2 = NULL;
1591 }
1592
1593 if (!dry_run) {
1594 /* We should have one item left on the stack */
1595 pred = __pop_pred_stack(&stack);
1596 if (!pred)
1597 return -EINVAL;
1598 /* This item is where we start from in matching */
1599 root = pred;
1600 /* Make sure the stack is empty */
1601 pred = __pop_pred_stack(&stack);
1602 if (WARN_ON(pred)) {
1603 err = -EINVAL;
1604 filter->root = NULL;
1605 goto fail;
1606 }
1607 err = check_pred_tree(filter, root);
1608 if (err)
1609 goto fail;
1610
1611 /* Optimize the tree */
1612 err = fold_pred_tree(filter, root);
1613 if (err)
1614 goto fail;
1615
1616 /* We don't set root until we know it works */
1617 barrier();
1618 filter->root = root;
1619 }
1620
1621 err = 0;
1622 fail:
1623 __free_pred_stack(&stack);
1624 return err;
1625 }
1626
1627 struct filter_list {
1628 struct list_head list;
1629 struct event_filter *filter;
1630 };
1631
1632 static int replace_system_preds(struct event_subsystem *system,
1633 struct filter_parse_state *ps,
1634 char *filter_string)
1635 {
1636 struct ftrace_event_call *call;
1637 struct filter_list *filter_item;
1638 struct filter_list *tmp;
1639 LIST_HEAD(filter_list);
1640 bool fail = true;
1641 int err;
1642
1643 list_for_each_entry(call, &ftrace_events, list) {
1644
1645 if (strcmp(call->class->system, system->name) != 0)
1646 continue;
1647
1648 /*
1649 * Try to see if the filter can be applied
1650 * (filter arg is ignored on dry_run)
1651 */
1652 err = replace_preds(call, NULL, ps, filter_string, true);
1653 if (err)
1654 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1655 else
1656 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1657 }
1658
1659 list_for_each_entry(call, &ftrace_events, list) {
1660 struct event_filter *filter;
1661
1662 if (strcmp(call->class->system, system->name) != 0)
1663 continue;
1664
1665 if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
1666 continue;
1667
1668 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1669 if (!filter_item)
1670 goto fail_mem;
1671
1672 list_add_tail(&filter_item->list, &filter_list);
1673
1674 filter_item->filter = __alloc_filter();
1675 if (!filter_item->filter)
1676 goto fail_mem;
1677 filter = filter_item->filter;
1678
1679 /* Can only fail on no memory */
1680 err = replace_filter_string(filter, filter_string);
1681 if (err)
1682 goto fail_mem;
1683
1684 err = replace_preds(call, filter, ps, filter_string, false);
1685 if (err) {
1686 filter_disable(call);
1687 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1688 append_filter_err(ps, filter);
1689 } else
1690 call->flags |= TRACE_EVENT_FL_FILTERED;
1691 /*
1692 * Regardless of if this returned an error, we still
1693 * replace the filter for the call.
1694 */
1695 filter = call->filter;
1696 rcu_assign_pointer(call->filter, filter_item->filter);
1697 filter_item->filter = filter;
1698
1699 fail = false;
1700 }
1701
1702 if (fail)
1703 goto fail;
1704
1705 /*
1706 * The calls can still be using the old filters.
1707 * Do a synchronize_sched() to ensure all calls are
1708 * done with them before we free them.
1709 */
1710 synchronize_sched();
1711 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1712 __free_filter(filter_item->filter);
1713 list_del(&filter_item->list);
1714 kfree(filter_item);
1715 }
1716 return 0;
1717 fail:
1718 /* No call succeeded */
1719 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1720 list_del(&filter_item->list);
1721 kfree(filter_item);
1722 }
1723 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1724 return -EINVAL;
1725 fail_mem:
1726 /* If any call succeeded, we still need to sync */
1727 if (!fail)
1728 synchronize_sched();
1729 list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1730 __free_filter(filter_item->filter);
1731 list_del(&filter_item->list);
1732 kfree(filter_item);
1733 }
1734 return -ENOMEM;
1735 }
1736
1737 static int create_filter_start(char *filter_str, bool set_str,
1738 struct filter_parse_state **psp,
1739 struct event_filter **filterp)
1740 {
1741 struct event_filter *filter;
1742 struct filter_parse_state *ps = NULL;
1743 int err = 0;
1744
1745 WARN_ON_ONCE(*psp || *filterp);
1746
1747 /* allocate everything, and if any fails, free all and fail */
1748 filter = __alloc_filter();
1749 if (filter && set_str)
1750 err = replace_filter_string(filter, filter_str);
1751
1752 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1753
1754 if (!filter || !ps || err) {
1755 kfree(ps);
1756 __free_filter(filter);
1757 return -ENOMEM;
1758 }
1759
1760 /* we're committed to creating a new filter */
1761 *filterp = filter;
1762 *psp = ps;
1763
1764 parse_init(ps, filter_ops, filter_str);
1765 err = filter_parse(ps);
1766 if (err && set_str)
1767 append_filter_err(ps, filter);
1768 return err;
1769 }
1770
1771 static void create_filter_finish(struct filter_parse_state *ps)
1772 {
1773 if (ps) {
1774 filter_opstack_clear(ps);
1775 postfix_clear(ps);
1776 kfree(ps);
1777 }
1778 }
1779
1780 /**
1781 * create_filter - create a filter for a ftrace_event_call
1782 * @call: ftrace_event_call to create a filter for
1783 * @filter_str: filter string
1784 * @set_str: remember @filter_str and enable detailed error in filter
1785 * @filterp: out param for created filter (always updated on return)
1786 *
1787 * Creates a filter for @call with @filter_str. If @set_str is %true,
1788 * @filter_str is copied and recorded in the new filter.
1789 *
1790 * On success, returns 0 and *@filterp points to the new filter. On
1791 * failure, returns -errno and *@filterp may point to %NULL or to a new
1792 * filter. In the latter case, the returned filter contains error
1793 * information if @set_str is %true and the caller is responsible for
1794 * freeing it.
1795 */
1796 static int create_filter(struct ftrace_event_call *call,
1797 char *filter_str, bool set_str,
1798 struct event_filter **filterp)
1799 {
1800 struct event_filter *filter = NULL;
1801 struct filter_parse_state *ps = NULL;
1802 int err;
1803
1804 err = create_filter_start(filter_str, set_str, &ps, &filter);
1805 if (!err) {
1806 err = replace_preds(call, filter, ps, filter_str, false);
1807 if (err && set_str)
1808 append_filter_err(ps, filter);
1809 }
1810 create_filter_finish(ps);
1811
1812 *filterp = filter;
1813 return err;
1814 }
1815
1816 /**
1817 * create_system_filter - create a filter for an event_subsystem
1818 * @system: event_subsystem to create a filter for
1819 * @filter_str: filter string
1820 * @filterp: out param for created filter (always updated on return)
1821 *
1822 * Identical to create_filter() except that it creates a subsystem filter
1823 * and always remembers @filter_str.
1824 */
1825 static int create_system_filter(struct event_subsystem *system,
1826 char *filter_str, struct event_filter **filterp)
1827 {
1828 struct event_filter *filter = NULL;
1829 struct filter_parse_state *ps = NULL;
1830 int err;
1831
1832 err = create_filter_start(filter_str, true, &ps, &filter);
1833 if (!err) {
1834 err = replace_system_preds(system, ps, filter_str);
1835 if (!err) {
1836 /* System filters just show a default message */
1837 kfree(filter->filter_string);
1838 filter->filter_string = NULL;
1839 } else {
1840 append_filter_err(ps, filter);
1841 }
1842 }
1843 create_filter_finish(ps);
1844
1845 *filterp = filter;
1846 return err;
1847 }
1848
1849 /* caller must hold event_mutex */
1850 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1851 {
1852 struct event_filter *filter;
1853 int err;
1854
1855 if (!strcmp(strstrip(filter_string), "0")) {
1856 filter_disable(call);
1857 filter = call->filter;
1858 if (!filter)
1859 return 0;
1860 RCU_INIT_POINTER(call->filter, NULL);
1861 /* Make sure the filter is not being used */
1862 synchronize_sched();
1863 __free_filter(filter);
1864 return 0;
1865 }
1866
1867 err = create_filter(call, filter_string, true, &filter);
1868
1869 /*
1870 * Always swap the call filter with the new filter
1871 * even if there was an error. If there was an error
1872 * in the filter, we disable the filter and show the error
1873 * string
1874 */
1875 if (filter) {
1876 struct event_filter *tmp = call->filter;
1877
1878 if (!err)
1879 call->flags |= TRACE_EVENT_FL_FILTERED;
1880 else
1881 filter_disable(call);
1882
1883 rcu_assign_pointer(call->filter, filter);
1884
1885 if (tmp) {
1886 /* Make sure the call is done with the filter */
1887 synchronize_sched();
1888 __free_filter(tmp);
1889 }
1890 }
1891
1892 return err;
1893 }
1894
1895 int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
1896 char *filter_string)
1897 {
1898 struct event_subsystem *system = dir->subsystem;
1899 struct event_filter *filter;
1900 int err = 0;
1901
1902 mutex_lock(&event_mutex);
1903
1904 /* Make sure the system still has events */
1905 if (!dir->nr_events) {
1906 err = -ENODEV;
1907 goto out_unlock;
1908 }
1909
1910 if (!strcmp(strstrip(filter_string), "0")) {
1911 filter_free_subsystem_preds(system);
1912 remove_filter_string(system->filter);
1913 filter = system->filter;
1914 system->filter = NULL;
1915 /* Ensure all filters are no longer used */
1916 synchronize_sched();
1917 filter_free_subsystem_filters(system);
1918 __free_filter(filter);
1919 goto out_unlock;
1920 }
1921
1922 err = create_system_filter(system, filter_string, &filter);
1923 if (filter) {
1924 /*
1925 * No event actually uses the system filter
1926 * we can free it without synchronize_sched().
1927 */
1928 __free_filter(system->filter);
1929 system->filter = filter;
1930 }
1931 out_unlock:
1932 mutex_unlock(&event_mutex);
1933
1934 return err;
1935 }
1936
1937 #ifdef CONFIG_PERF_EVENTS
1938
1939 void ftrace_profile_free_filter(struct perf_event *event)
1940 {
1941 struct event_filter *filter = event->filter;
1942
1943 event->filter = NULL;
1944 __free_filter(filter);
1945 }
1946
1947 struct function_filter_data {
1948 struct ftrace_ops *ops;
1949 int first_filter;
1950 int first_notrace;
1951 };
1952
1953 #ifdef CONFIG_FUNCTION_TRACER
1954 static char **
1955 ftrace_function_filter_re(char *buf, int len, int *count)
1956 {
1957 char *str, *sep, **re;
1958
1959 str = kstrndup(buf, len, GFP_KERNEL);
1960 if (!str)
1961 return NULL;
1962
1963 /*
1964 * The argv_split function takes white space
1965 * as a separator, so convert ',' into spaces.
1966 */
1967 while ((sep = strchr(str, ',')))
1968 *sep = ' ';
1969
1970 re = argv_split(GFP_KERNEL, str, count);
1971 kfree(str);
1972 return re;
1973 }
1974
1975 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
1976 int reset, char *re, int len)
1977 {
1978 int ret;
1979
1980 if (filter)
1981 ret = ftrace_set_filter(ops, re, len, reset);
1982 else
1983 ret = ftrace_set_notrace(ops, re, len, reset);
1984
1985 return ret;
1986 }
1987
1988 static int __ftrace_function_set_filter(int filter, char *buf, int len,
1989 struct function_filter_data *data)
1990 {
1991 int i, re_cnt, ret = -EINVAL;
1992 int *reset;
1993 char **re;
1994
1995 reset = filter ? &data->first_filter : &data->first_notrace;
1996
1997 /*
1998 * The 'ip' field could have multiple filters set, separated
1999 * either by space or comma. We first cut the filter and apply
2000 * all pieces separatelly.
2001 */
2002 re = ftrace_function_filter_re(buf, len, &re_cnt);
2003 if (!re)
2004 return -EINVAL;
2005
2006 for (i = 0; i < re_cnt; i++) {
2007 ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2008 re[i], strlen(re[i]));
2009 if (ret)
2010 break;
2011
2012 if (*reset)
2013 *reset = 0;
2014 }
2015
2016 argv_free(re);
2017 return ret;
2018 }
2019
2020 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2021 {
2022 struct ftrace_event_field *field = pred->field;
2023
2024 if (leaf) {
2025 /*
2026 * Check the leaf predicate for function trace, verify:
2027 * - only '==' and '!=' is used
2028 * - the 'ip' field is used
2029 */
2030 if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2031 return -EINVAL;
2032
2033 if (strcmp(field->name, "ip"))
2034 return -EINVAL;
2035 } else {
2036 /*
2037 * Check the non leaf predicate for function trace, verify:
2038 * - only '||' is used
2039 */
2040 if (pred->op != OP_OR)
2041 return -EINVAL;
2042 }
2043
2044 return 0;
2045 }
2046
2047 static int ftrace_function_set_filter_cb(enum move_type move,
2048 struct filter_pred *pred,
2049 int *err, void *data)
2050 {
2051 /* Checking the node is valid for function trace. */
2052 if ((move != MOVE_DOWN) ||
2053 (pred->left != FILTER_PRED_INVALID)) {
2054 *err = ftrace_function_check_pred(pred, 0);
2055 } else {
2056 *err = ftrace_function_check_pred(pred, 1);
2057 if (*err)
2058 return WALK_PRED_ABORT;
2059
2060 *err = __ftrace_function_set_filter(pred->op == OP_EQ,
2061 pred->regex.pattern,
2062 pred->regex.len,
2063 data);
2064 }
2065
2066 return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2067 }
2068
2069 static int ftrace_function_set_filter(struct perf_event *event,
2070 struct event_filter *filter)
2071 {
2072 struct function_filter_data data = {
2073 .first_filter = 1,
2074 .first_notrace = 1,
2075 .ops = &event->ftrace_ops,
2076 };
2077
2078 return walk_pred_tree(filter->preds, filter->root,
2079 ftrace_function_set_filter_cb, &data);
2080 }
2081 #else
2082 static int ftrace_function_set_filter(struct perf_event *event,
2083 struct event_filter *filter)
2084 {
2085 return -ENODEV;
2086 }
2087 #endif /* CONFIG_FUNCTION_TRACER */
2088
2089 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2090 char *filter_str)
2091 {
2092 int err;
2093 struct event_filter *filter;
2094 struct ftrace_event_call *call;
2095
2096 mutex_lock(&event_mutex);
2097
2098 call = event->tp_event;
2099
2100 err = -EINVAL;
2101 if (!call)
2102 goto out_unlock;
2103
2104 err = -EEXIST;
2105 if (event->filter)
2106 goto out_unlock;
2107
2108 err = create_filter(call, filter_str, false, &filter);
2109 if (err)
2110 goto free_filter;
2111
2112 if (ftrace_event_is_function(call))
2113 err = ftrace_function_set_filter(event, filter);
2114 else
2115 event->filter = filter;
2116
2117 free_filter:
2118 if (err || ftrace_event_is_function(call))
2119 __free_filter(filter);
2120
2121 out_unlock:
2122 mutex_unlock(&event_mutex);
2123
2124 return err;
2125 }
2126
2127 #endif /* CONFIG_PERF_EVENTS */
2128
2129 #ifdef CONFIG_FTRACE_STARTUP_TEST
2130
2131 #include <linux/types.h>
2132 #include <linux/tracepoint.h>
2133
2134 #define CREATE_TRACE_POINTS
2135 #include "trace_events_filter_test.h"
2136
2137 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2138 { \
2139 .filter = FILTER, \
2140 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2141 .e = ve, .f = vf, .g = vg, .h = vh }, \
2142 .match = m, \
2143 .not_visited = nvisit, \
2144 }
2145 #define YES 1
2146 #define NO 0
2147
2148 static struct test_filter_data_t {
2149 char *filter;
2150 struct ftrace_raw_ftrace_test_filter rec;
2151 int match;
2152 char *not_visited;
2153 } test_filter_data[] = {
2154 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2155 "e == 1 && f == 1 && g == 1 && h == 1"
2156 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2157 DATA_REC(NO, 0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2158 DATA_REC(NO, 1, 1, 1, 1, 1, 1, 1, 0, ""),
2159 #undef FILTER
2160 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2161 "e == 1 || f == 1 || g == 1 || h == 1"
2162 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2163 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2164 DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2165 #undef FILTER
2166 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2167 "(e == 1 || f == 1) && (g == 1 || h == 1)"
2168 DATA_REC(NO, 0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2169 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2170 DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2171 DATA_REC(NO, 1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2172 #undef FILTER
2173 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2174 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2175 DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2176 DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2177 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2178 #undef FILTER
2179 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2180 "(e == 1 && f == 1) || (g == 1 && h == 1)"
2181 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2182 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2183 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2184 #undef FILTER
2185 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2186 "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2187 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2188 DATA_REC(NO, 0, 0, 0, 0, 0, 0, 0, 0, ""),
2189 DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2190 #undef FILTER
2191 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2192 "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2193 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2194 DATA_REC(NO, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2195 DATA_REC(NO, 1, 0, 1, 0, 1, 0, 1, 0, ""),
2196 #undef FILTER
2197 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2198 "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2199 DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2200 DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2201 DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2202 };
2203
2204 #undef DATA_REC
2205 #undef FILTER
2206 #undef YES
2207 #undef NO
2208
2209 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2210
2211 static int test_pred_visited;
2212
2213 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2214 {
2215 struct ftrace_event_field *field = pred->field;
2216
2217 test_pred_visited = 1;
2218 printk(KERN_INFO "\npred visited %s\n", field->name);
2219 return 1;
2220 }
2221
2222 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2223 int *err, void *data)
2224 {
2225 char *fields = data;
2226
2227 if ((move == MOVE_DOWN) &&
2228 (pred->left == FILTER_PRED_INVALID)) {
2229 struct ftrace_event_field *field = pred->field;
2230
2231 if (!field) {
2232 WARN(1, "all leafs should have field defined");
2233 return WALK_PRED_DEFAULT;
2234 }
2235 if (!strchr(fields, *field->name))
2236 return WALK_PRED_DEFAULT;
2237
2238 WARN_ON(!pred->fn);
2239 pred->fn = test_pred_visited_fn;
2240 }
2241 return WALK_PRED_DEFAULT;
2242 }
2243
2244 static __init int ftrace_test_event_filter(void)
2245 {
2246 int i;
2247
2248 printk(KERN_INFO "Testing ftrace filter: ");
2249
2250 for (i = 0; i < DATA_CNT; i++) {
2251 struct event_filter *filter = NULL;
2252 struct test_filter_data_t *d = &test_filter_data[i];
2253 int err;
2254
2255 err = create_filter(&event_ftrace_test_filter, d->filter,
2256 false, &filter);
2257 if (err) {
2258 printk(KERN_INFO
2259 "Failed to get filter for '%s', err %d\n",
2260 d->filter, err);
2261 __free_filter(filter);
2262 break;
2263 }
2264
2265 /*
2266 * The preemption disabling is not really needed for self
2267 * tests, but the rcu dereference will complain without it.
2268 */
2269 preempt_disable();
2270 if (*d->not_visited)
2271 walk_pred_tree(filter->preds, filter->root,
2272 test_walk_pred_cb,
2273 d->not_visited);
2274
2275 test_pred_visited = 0;
2276 err = filter_match_preds(filter, &d->rec);
2277 preempt_enable();
2278
2279 __free_filter(filter);
2280
2281 if (test_pred_visited) {
2282 printk(KERN_INFO
2283 "Failed, unwanted pred visited for filter %s\n",
2284 d->filter);
2285 break;
2286 }
2287
2288 if (err != d->match) {
2289 printk(KERN_INFO
2290 "Failed to match filter '%s', expected %d\n",
2291 d->filter, d->match);
2292 break;
2293 }
2294 }
2295
2296 if (i == DATA_CNT)
2297 printk(KERN_CONT "OK\n");
2298
2299 return 0;
2300 }
2301
2302 late_initcall(ftrace_test_event_filter);
2303
2304 #endif /* CONFIG_FTRACE_STARTUP_TEST */