Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_kprobe.c
1 /*
2 * Kprobes-based tracing events
3 *
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22
23 #include "trace_probe.h"
24
25 #define KPROBE_EVENT_SYSTEM "kprobes"
26
27 /**
28 * Kprobe event core functions
29 */
30 struct trace_probe {
31 struct list_head list;
32 struct kretprobe rp; /* Use rp.kp for kprobe use */
33 unsigned long nhit;
34 unsigned int flags; /* For TP_FLAG_* */
35 const char *symbol; /* symbol name */
36 struct ftrace_event_class class;
37 struct ftrace_event_call call;
38 struct ftrace_event_file * __rcu *files;
39 ssize_t size; /* trace entry size */
40 unsigned int nr_args;
41 struct probe_arg args[];
42 };
43
44 #define SIZEOF_TRACE_PROBE(n) \
45 (offsetof(struct trace_probe, args) + \
46 (sizeof(struct probe_arg) * (n)))
47
48
49 static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
50 {
51 return tp->rp.handler != NULL;
52 }
53
54 static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
55 {
56 return tp->symbol ? tp->symbol : "unknown";
57 }
58
59 static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
60 {
61 return tp->rp.kp.offset;
62 }
63
64 static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
65 {
66 return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
67 }
68
69 static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
70 {
71 return !!(tp->flags & TP_FLAG_REGISTERED);
72 }
73
74 static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
75 {
76 return !!(kprobe_gone(&tp->rp.kp));
77 }
78
79 static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
80 struct module *mod)
81 {
82 int len = strlen(mod->name);
83 const char *name = trace_probe_symbol(tp);
84 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
85 }
86
87 static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
88 {
89 return !!strchr(trace_probe_symbol(tp), ':');
90 }
91
92 static int register_probe_event(struct trace_probe *tp);
93 static int unregister_probe_event(struct trace_probe *tp);
94
95 static DEFINE_MUTEX(probe_lock);
96 static LIST_HEAD(probe_list);
97
98 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
99 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
100 struct pt_regs *regs);
101
102 /*
103 * Allocate new trace_probe and initialize it (including kprobes).
104 */
105 static struct trace_probe *alloc_trace_probe(const char *group,
106 const char *event,
107 void *addr,
108 const char *symbol,
109 unsigned long offs,
110 int nargs, bool is_return)
111 {
112 struct trace_probe *tp;
113 int ret = -ENOMEM;
114
115 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
116 if (!tp)
117 return ERR_PTR(ret);
118
119 if (symbol) {
120 tp->symbol = kstrdup(symbol, GFP_KERNEL);
121 if (!tp->symbol)
122 goto error;
123 tp->rp.kp.symbol_name = tp->symbol;
124 tp->rp.kp.offset = offs;
125 } else
126 tp->rp.kp.addr = addr;
127
128 if (is_return)
129 tp->rp.handler = kretprobe_dispatcher;
130 else
131 tp->rp.kp.pre_handler = kprobe_dispatcher;
132
133 if (!event || !is_good_name(event)) {
134 ret = -EINVAL;
135 goto error;
136 }
137
138 tp->call.class = &tp->class;
139 tp->call.name = kstrdup(event, GFP_KERNEL);
140 if (!tp->call.name)
141 goto error;
142
143 if (!group || !is_good_name(group)) {
144 ret = -EINVAL;
145 goto error;
146 }
147
148 tp->class.system = kstrdup(group, GFP_KERNEL);
149 if (!tp->class.system)
150 goto error;
151
152 INIT_LIST_HEAD(&tp->list);
153 return tp;
154 error:
155 kfree(tp->call.name);
156 kfree(tp->symbol);
157 kfree(tp);
158 return ERR_PTR(ret);
159 }
160
161 static void free_trace_probe(struct trace_probe *tp)
162 {
163 int i;
164
165 for (i = 0; i < tp->nr_args; i++)
166 traceprobe_free_probe_arg(&tp->args[i]);
167
168 kfree(tp->call.class->system);
169 kfree(tp->call.name);
170 kfree(tp->symbol);
171 kfree(tp);
172 }
173
174 static struct trace_probe *find_trace_probe(const char *event,
175 const char *group)
176 {
177 struct trace_probe *tp;
178
179 list_for_each_entry(tp, &probe_list, list)
180 if (strcmp(tp->call.name, event) == 0 &&
181 strcmp(tp->call.class->system, group) == 0)
182 return tp;
183 return NULL;
184 }
185
186 static int trace_probe_nr_files(struct trace_probe *tp)
187 {
188 struct ftrace_event_file **file;
189 int ret = 0;
190
191 /*
192 * Since all tp->files updater is protected by probe_enable_lock,
193 * we don't need to lock an rcu_read_lock.
194 */
195 file = rcu_dereference_raw(tp->files);
196 if (file)
197 while (*(file++))
198 ret++;
199
200 return ret;
201 }
202
203 static DEFINE_MUTEX(probe_enable_lock);
204
205 /*
206 * Enable trace_probe
207 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
208 */
209 static int
210 enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
211 {
212 int ret = 0;
213
214 mutex_lock(&probe_enable_lock);
215
216 if (file) {
217 struct ftrace_event_file **new, **old;
218 int n = trace_probe_nr_files(tp);
219
220 old = rcu_dereference_raw(tp->files);
221 /* 1 is for new one and 1 is for stopper */
222 new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
223 GFP_KERNEL);
224 if (!new) {
225 ret = -ENOMEM;
226 goto out_unlock;
227 }
228 memcpy(new, old, n * sizeof(struct ftrace_event_file *));
229 new[n] = file;
230 /* The last one keeps a NULL */
231
232 rcu_assign_pointer(tp->files, new);
233 tp->flags |= TP_FLAG_TRACE;
234
235 if (old) {
236 /* Make sure the probe is done with old files */
237 synchronize_sched();
238 kfree(old);
239 }
240 } else
241 tp->flags |= TP_FLAG_PROFILE;
242
243 if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
244 !trace_probe_has_gone(tp)) {
245 if (trace_probe_is_return(tp))
246 ret = enable_kretprobe(&tp->rp);
247 else
248 ret = enable_kprobe(&tp->rp.kp);
249 }
250
251 out_unlock:
252 mutex_unlock(&probe_enable_lock);
253
254 return ret;
255 }
256
257 static int
258 trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
259 {
260 struct ftrace_event_file **files;
261 int i;
262
263 /*
264 * Since all tp->files updater is protected by probe_enable_lock,
265 * we don't need to lock an rcu_read_lock.
266 */
267 files = rcu_dereference_raw(tp->files);
268 if (files) {
269 for (i = 0; files[i]; i++)
270 if (files[i] == file)
271 return i;
272 }
273
274 return -1;
275 }
276
277 /*
278 * Disable trace_probe
279 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
280 */
281 static int
282 disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
283 {
284 struct ftrace_event_file **old = NULL;
285 int wait = 0;
286 int ret = 0;
287
288 mutex_lock(&probe_enable_lock);
289
290 if (file) {
291 struct ftrace_event_file **new, **old;
292 int n = trace_probe_nr_files(tp);
293 int i, j;
294
295 old = rcu_dereference_raw(tp->files);
296 if (n == 0 || trace_probe_file_index(tp, file) < 0) {
297 ret = -EINVAL;
298 goto out_unlock;
299 }
300
301 if (n == 1) { /* Remove the last file */
302 tp->flags &= ~TP_FLAG_TRACE;
303 new = NULL;
304 } else {
305 new = kzalloc(n * sizeof(struct ftrace_event_file *),
306 GFP_KERNEL);
307 if (!new) {
308 ret = -ENOMEM;
309 goto out_unlock;
310 }
311
312 /* This copy & check loop copies the NULL stopper too */
313 for (i = 0, j = 0; j < n && i < n + 1; i++)
314 if (old[i] != file)
315 new[j++] = old[i];
316 }
317
318 rcu_assign_pointer(tp->files, new);
319 wait = 1;
320 } else
321 tp->flags &= ~TP_FLAG_PROFILE;
322
323 if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
324 if (trace_probe_is_return(tp))
325 disable_kretprobe(&tp->rp);
326 else
327 disable_kprobe(&tp->rp.kp);
328 wait = 1;
329 }
330
331 out_unlock:
332 mutex_unlock(&probe_enable_lock);
333
334 if (wait) {
335 /*
336 * Synchronize with kprobe_trace_func/kretprobe_trace_func
337 * to ensure disabled (all running handlers are finished).
338 * This is not only for kfree(), but also the caller,
339 * trace_remove_event_call() supposes it for releasing
340 * event_call related objects, which will be accessed in
341 * the kprobe_trace_func/kretprobe_trace_func.
342 */
343 synchronize_sched();
344 kfree(old); /* Ignored if link == NULL */
345 }
346
347 return ret;
348 }
349
350 /* Internal register function - just handle k*probes and flags */
351 static int __register_trace_probe(struct trace_probe *tp)
352 {
353 int i, ret;
354
355 if (trace_probe_is_registered(tp))
356 return -EINVAL;
357
358 for (i = 0; i < tp->nr_args; i++)
359 traceprobe_update_arg(&tp->args[i]);
360
361 /* Set/clear disabled flag according to tp->flag */
362 if (trace_probe_is_enabled(tp))
363 tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
364 else
365 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
366
367 if (trace_probe_is_return(tp))
368 ret = register_kretprobe(&tp->rp);
369 else
370 ret = register_kprobe(&tp->rp.kp);
371
372 if (ret == 0)
373 tp->flags |= TP_FLAG_REGISTERED;
374 else {
375 pr_warning("Could not insert probe at %s+%lu: %d\n",
376 trace_probe_symbol(tp), trace_probe_offset(tp), ret);
377 if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
378 pr_warning("This probe might be able to register after"
379 "target module is loaded. Continue.\n");
380 ret = 0;
381 } else if (ret == -EILSEQ) {
382 pr_warning("Probing address(0x%p) is not an "
383 "instruction boundary.\n",
384 tp->rp.kp.addr);
385 ret = -EINVAL;
386 }
387 }
388
389 return ret;
390 }
391
392 /* Internal unregister function - just handle k*probes and flags */
393 static void __unregister_trace_probe(struct trace_probe *tp)
394 {
395 if (trace_probe_is_registered(tp)) {
396 if (trace_probe_is_return(tp))
397 unregister_kretprobe(&tp->rp);
398 else
399 unregister_kprobe(&tp->rp.kp);
400 tp->flags &= ~TP_FLAG_REGISTERED;
401 /* Cleanup kprobe for reuse */
402 if (tp->rp.kp.symbol_name)
403 tp->rp.kp.addr = NULL;
404 }
405 }
406
407 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
408 static int unregister_trace_probe(struct trace_probe *tp)
409 {
410 /* Enabled event can not be unregistered */
411 if (trace_probe_is_enabled(tp))
412 return -EBUSY;
413
414 /* Will fail if probe is being used by ftrace or perf */
415 if (unregister_probe_event(tp))
416 return -EBUSY;
417
418 __unregister_trace_probe(tp);
419 list_del(&tp->list);
420
421 return 0;
422 }
423
424 /* Register a trace_probe and probe_event */
425 static int register_trace_probe(struct trace_probe *tp)
426 {
427 struct trace_probe *old_tp;
428 int ret;
429
430 mutex_lock(&probe_lock);
431
432 /* Delete old (same name) event if exist */
433 old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
434 if (old_tp) {
435 ret = unregister_trace_probe(old_tp);
436 if (ret < 0)
437 goto end;
438 free_trace_probe(old_tp);
439 }
440
441 /* Register new event */
442 ret = register_probe_event(tp);
443 if (ret) {
444 pr_warning("Failed to register probe event(%d)\n", ret);
445 goto end;
446 }
447
448 /* Register k*probe */
449 ret = __register_trace_probe(tp);
450 if (ret < 0)
451 unregister_probe_event(tp);
452 else
453 list_add_tail(&tp->list, &probe_list);
454
455 end:
456 mutex_unlock(&probe_lock);
457 return ret;
458 }
459
460 /* Module notifier call back, checking event on the module */
461 static int trace_probe_module_callback(struct notifier_block *nb,
462 unsigned long val, void *data)
463 {
464 struct module *mod = data;
465 struct trace_probe *tp;
466 int ret;
467
468 if (val != MODULE_STATE_COMING)
469 return NOTIFY_DONE;
470
471 /* Update probes on coming module */
472 mutex_lock(&probe_lock);
473 list_for_each_entry(tp, &probe_list, list) {
474 if (trace_probe_within_module(tp, mod)) {
475 /* Don't need to check busy - this should have gone. */
476 __unregister_trace_probe(tp);
477 ret = __register_trace_probe(tp);
478 if (ret)
479 pr_warning("Failed to re-register probe %s on"
480 "%s: %d\n",
481 tp->call.name, mod->name, ret);
482 }
483 }
484 mutex_unlock(&probe_lock);
485
486 return NOTIFY_DONE;
487 }
488
489 static struct notifier_block trace_probe_module_nb = {
490 .notifier_call = trace_probe_module_callback,
491 .priority = 1 /* Invoked after kprobe module callback */
492 };
493
494 static int create_trace_probe(int argc, char **argv)
495 {
496 /*
497 * Argument syntax:
498 * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
499 * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
500 * Fetch args:
501 * $retval : fetch return value
502 * $stack : fetch stack address
503 * $stackN : fetch Nth of stack (N:0-)
504 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
505 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
506 * %REG : fetch register REG
507 * Dereferencing memory fetch:
508 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
509 * Alias name of args:
510 * NAME=FETCHARG : set NAME as alias of FETCHARG.
511 * Type of args:
512 * FETCHARG:TYPE : use TYPE instead of unsigned long.
513 */
514 struct trace_probe *tp;
515 int i, ret = 0;
516 bool is_return = false, is_delete = false;
517 char *symbol = NULL, *event = NULL, *group = NULL;
518 char *arg;
519 unsigned long offset = 0;
520 void *addr = NULL;
521 char buf[MAX_EVENT_NAME_LEN];
522
523 /* argc must be >= 1 */
524 if (argv[0][0] == 'p')
525 is_return = false;
526 else if (argv[0][0] == 'r')
527 is_return = true;
528 else if (argv[0][0] == '-')
529 is_delete = true;
530 else {
531 pr_info("Probe definition must be started with 'p', 'r' or"
532 " '-'.\n");
533 return -EINVAL;
534 }
535
536 if (argv[0][1] == ':') {
537 event = &argv[0][2];
538 if (strchr(event, '/')) {
539 group = event;
540 event = strchr(group, '/') + 1;
541 event[-1] = '\0';
542 if (strlen(group) == 0) {
543 pr_info("Group name is not specified\n");
544 return -EINVAL;
545 }
546 }
547 if (strlen(event) == 0) {
548 pr_info("Event name is not specified\n");
549 return -EINVAL;
550 }
551 }
552 if (!group)
553 group = KPROBE_EVENT_SYSTEM;
554
555 if (is_delete) {
556 if (!event) {
557 pr_info("Delete command needs an event name.\n");
558 return -EINVAL;
559 }
560 mutex_lock(&probe_lock);
561 tp = find_trace_probe(event, group);
562 if (!tp) {
563 mutex_unlock(&probe_lock);
564 pr_info("Event %s/%s doesn't exist.\n", group, event);
565 return -ENOENT;
566 }
567 /* delete an event */
568 ret = unregister_trace_probe(tp);
569 if (ret == 0)
570 free_trace_probe(tp);
571 mutex_unlock(&probe_lock);
572 return ret;
573 }
574
575 if (argc < 2) {
576 pr_info("Probe point is not specified.\n");
577 return -EINVAL;
578 }
579 if (isdigit(argv[1][0])) {
580 if (is_return) {
581 pr_info("Return probe point must be a symbol.\n");
582 return -EINVAL;
583 }
584 /* an address specified */
585 ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
586 if (ret) {
587 pr_info("Failed to parse address.\n");
588 return ret;
589 }
590 } else {
591 /* a symbol specified */
592 symbol = argv[1];
593 /* TODO: support .init module functions */
594 ret = traceprobe_split_symbol_offset(symbol, &offset);
595 if (ret) {
596 pr_info("Failed to parse symbol.\n");
597 return ret;
598 }
599 if (offset && is_return) {
600 pr_info("Return probe must be used without offset.\n");
601 return -EINVAL;
602 }
603 }
604 argc -= 2; argv += 2;
605
606 /* setup a probe */
607 if (!event) {
608 /* Make a new event name */
609 if (symbol)
610 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
611 is_return ? 'r' : 'p', symbol, offset);
612 else
613 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
614 is_return ? 'r' : 'p', addr);
615 event = buf;
616 }
617 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
618 is_return);
619 if (IS_ERR(tp)) {
620 pr_info("Failed to allocate trace_probe.(%d)\n",
621 (int)PTR_ERR(tp));
622 return PTR_ERR(tp);
623 }
624
625 /* parse arguments */
626 ret = 0;
627 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
628 /* Increment count for freeing args in error case */
629 tp->nr_args++;
630
631 /* Parse argument name */
632 arg = strchr(argv[i], '=');
633 if (arg) {
634 *arg++ = '\0';
635 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
636 } else {
637 arg = argv[i];
638 /* If argument name is omitted, set "argN" */
639 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
640 tp->args[i].name = kstrdup(buf, GFP_KERNEL);
641 }
642
643 if (!tp->args[i].name) {
644 pr_info("Failed to allocate argument[%d] name.\n", i);
645 ret = -ENOMEM;
646 goto error;
647 }
648
649 if (!is_good_name(tp->args[i].name)) {
650 pr_info("Invalid argument[%d] name: %s\n",
651 i, tp->args[i].name);
652 ret = -EINVAL;
653 goto error;
654 }
655
656 if (traceprobe_conflict_field_name(tp->args[i].name,
657 tp->args, i)) {
658 pr_info("Argument[%d] name '%s' conflicts with "
659 "another field.\n", i, argv[i]);
660 ret = -EINVAL;
661 goto error;
662 }
663
664 /* Parse fetch argument */
665 ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
666 is_return, true);
667 if (ret) {
668 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
669 goto error;
670 }
671 }
672
673 ret = register_trace_probe(tp);
674 if (ret)
675 goto error;
676 return 0;
677
678 error:
679 free_trace_probe(tp);
680 return ret;
681 }
682
683 static int release_all_trace_probes(void)
684 {
685 struct trace_probe *tp;
686 int ret = 0;
687
688 mutex_lock(&probe_lock);
689 /* Ensure no probe is in use. */
690 list_for_each_entry(tp, &probe_list, list)
691 if (trace_probe_is_enabled(tp)) {
692 ret = -EBUSY;
693 goto end;
694 }
695 /* TODO: Use batch unregistration */
696 while (!list_empty(&probe_list)) {
697 tp = list_entry(probe_list.next, struct trace_probe, list);
698 ret = unregister_trace_probe(tp);
699 if (ret)
700 goto end;
701 free_trace_probe(tp);
702 }
703
704 end:
705 mutex_unlock(&probe_lock);
706
707 return ret;
708 }
709
710 /* Probes listing interfaces */
711 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
712 {
713 mutex_lock(&probe_lock);
714 return seq_list_start(&probe_list, *pos);
715 }
716
717 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
718 {
719 return seq_list_next(v, &probe_list, pos);
720 }
721
722 static void probes_seq_stop(struct seq_file *m, void *v)
723 {
724 mutex_unlock(&probe_lock);
725 }
726
727 static int probes_seq_show(struct seq_file *m, void *v)
728 {
729 struct trace_probe *tp = v;
730 int i;
731
732 seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
733 seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
734
735 if (!tp->symbol)
736 seq_printf(m, " 0x%p", tp->rp.kp.addr);
737 else if (tp->rp.kp.offset)
738 seq_printf(m, " %s+%u", trace_probe_symbol(tp),
739 tp->rp.kp.offset);
740 else
741 seq_printf(m, " %s", trace_probe_symbol(tp));
742
743 for (i = 0; i < tp->nr_args; i++)
744 seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
745 seq_printf(m, "\n");
746
747 return 0;
748 }
749
750 static const struct seq_operations probes_seq_op = {
751 .start = probes_seq_start,
752 .next = probes_seq_next,
753 .stop = probes_seq_stop,
754 .show = probes_seq_show
755 };
756
757 static int probes_open(struct inode *inode, struct file *file)
758 {
759 int ret;
760
761 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
762 ret = release_all_trace_probes();
763 if (ret < 0)
764 return ret;
765 }
766
767 return seq_open(file, &probes_seq_op);
768 }
769
770 static ssize_t probes_write(struct file *file, const char __user *buffer,
771 size_t count, loff_t *ppos)
772 {
773 return traceprobe_probes_write(file, buffer, count, ppos,
774 create_trace_probe);
775 }
776
777 static const struct file_operations kprobe_events_ops = {
778 .owner = THIS_MODULE,
779 .open = probes_open,
780 .read = seq_read,
781 .llseek = seq_lseek,
782 .release = seq_release,
783 .write = probes_write,
784 };
785
786 /* Probes profiling interfaces */
787 static int probes_profile_seq_show(struct seq_file *m, void *v)
788 {
789 struct trace_probe *tp = v;
790
791 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
792 tp->rp.kp.nmissed);
793
794 return 0;
795 }
796
797 static const struct seq_operations profile_seq_op = {
798 .start = probes_seq_start,
799 .next = probes_seq_next,
800 .stop = probes_seq_stop,
801 .show = probes_profile_seq_show
802 };
803
804 static int profile_open(struct inode *inode, struct file *file)
805 {
806 return seq_open(file, &profile_seq_op);
807 }
808
809 static const struct file_operations kprobe_profile_ops = {
810 .owner = THIS_MODULE,
811 .open = profile_open,
812 .read = seq_read,
813 .llseek = seq_lseek,
814 .release = seq_release,
815 };
816
817 /* Sum up total data length for dynamic arraies (strings) */
818 static __kprobes int __get_data_size(struct trace_probe *tp,
819 struct pt_regs *regs)
820 {
821 int i, ret = 0;
822 u32 len;
823
824 for (i = 0; i < tp->nr_args; i++)
825 if (unlikely(tp->args[i].fetch_size.fn)) {
826 call_fetch(&tp->args[i].fetch_size, regs, &len);
827 ret += len;
828 }
829
830 return ret;
831 }
832
833 /* Store the value of each argument */
834 static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
835 struct pt_regs *regs,
836 u8 *data, int maxlen)
837 {
838 int i;
839 u32 end = tp->size;
840 u32 *dl; /* Data (relative) location */
841
842 for (i = 0; i < tp->nr_args; i++) {
843 if (unlikely(tp->args[i].fetch_size.fn)) {
844 /*
845 * First, we set the relative location and
846 * maximum data length to *dl
847 */
848 dl = (u32 *)(data + tp->args[i].offset);
849 *dl = make_data_rloc(maxlen, end - tp->args[i].offset);
850 /* Then try to fetch string or dynamic array data */
851 call_fetch(&tp->args[i].fetch, regs, dl);
852 /* Reduce maximum length */
853 end += get_rloc_len(*dl);
854 maxlen -= get_rloc_len(*dl);
855 /* Trick here, convert data_rloc to data_loc */
856 *dl = convert_rloc_to_loc(*dl,
857 ent_size + tp->args[i].offset);
858 } else
859 /* Just fetching data normally */
860 call_fetch(&tp->args[i].fetch, regs,
861 data + tp->args[i].offset);
862 }
863 }
864
865 /* Kprobe handler */
866 static __kprobes void
867 __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
868 struct ftrace_event_file *ftrace_file)
869 {
870 struct kprobe_trace_entry_head *entry;
871 struct ring_buffer_event *event;
872 struct ring_buffer *buffer;
873 int size, dsize, pc;
874 unsigned long irq_flags;
875 struct ftrace_event_call *call = &tp->call;
876
877 WARN_ON(call != ftrace_file->event_call);
878
879 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
880 return;
881
882 local_save_flags(irq_flags);
883 pc = preempt_count();
884
885 dsize = __get_data_size(tp, regs);
886 size = sizeof(*entry) + tp->size + dsize;
887
888 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
889 call->event.type,
890 size, irq_flags, pc);
891 if (!event)
892 return;
893
894 entry = ring_buffer_event_data(event);
895 entry->ip = (unsigned long)tp->rp.kp.addr;
896 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
897
898 if (!filter_current_check_discard(buffer, call, entry, event))
899 trace_buffer_unlock_commit_regs(buffer, event,
900 irq_flags, pc, regs);
901 }
902
903 static __kprobes void
904 kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
905 {
906 /*
907 * Note: preempt is already disabled around the kprobe handler.
908 * However, we still need an smp_read_barrier_depends() corresponding
909 * to smp_wmb() in rcu_assign_pointer() to access the pointer.
910 */
911 struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
912
913 if (unlikely(!file))
914 return;
915
916 while (*file) {
917 __kprobe_trace_func(tp, regs, *file);
918 file++;
919 }
920 }
921
922 /* Kretprobe handler */
923 static __kprobes void
924 __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
925 struct pt_regs *regs,
926 struct ftrace_event_file *ftrace_file)
927 {
928 struct kretprobe_trace_entry_head *entry;
929 struct ring_buffer_event *event;
930 struct ring_buffer *buffer;
931 int size, pc, dsize;
932 unsigned long irq_flags;
933 struct ftrace_event_call *call = &tp->call;
934
935 WARN_ON(call != ftrace_file->event_call);
936
937 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
938 return;
939
940 local_save_flags(irq_flags);
941 pc = preempt_count();
942
943 dsize = __get_data_size(tp, regs);
944 size = sizeof(*entry) + tp->size + dsize;
945
946 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
947 call->event.type,
948 size, irq_flags, pc);
949 if (!event)
950 return;
951
952 entry = ring_buffer_event_data(event);
953 entry->func = (unsigned long)tp->rp.kp.addr;
954 entry->ret_ip = (unsigned long)ri->ret_addr;
955 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
956
957 if (!filter_current_check_discard(buffer, call, entry, event))
958 trace_buffer_unlock_commit_regs(buffer, event,
959 irq_flags, pc, regs);
960 }
961
962 static __kprobes void
963 kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
964 struct pt_regs *regs)
965 {
966 /*
967 * Note: preempt is already disabled around the kprobe handler.
968 * However, we still need an smp_read_barrier_depends() corresponding
969 * to smp_wmb() in rcu_assign_pointer() to access the pointer.
970 */
971 struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
972
973 if (unlikely(!file))
974 return;
975
976 while (*file) {
977 __kretprobe_trace_func(tp, ri, regs, *file);
978 file++;
979 }
980 }
981
982 /* Event entry printers */
983 static enum print_line_t
984 print_kprobe_event(struct trace_iterator *iter, int flags,
985 struct trace_event *event)
986 {
987 struct kprobe_trace_entry_head *field;
988 struct trace_seq *s = &iter->seq;
989 struct trace_probe *tp;
990 u8 *data;
991 int i;
992
993 field = (struct kprobe_trace_entry_head *)iter->ent;
994 tp = container_of(event, struct trace_probe, call.event);
995
996 if (!trace_seq_printf(s, "%s: (", tp->call.name))
997 goto partial;
998
999 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1000 goto partial;
1001
1002 if (!trace_seq_puts(s, ")"))
1003 goto partial;
1004
1005 data = (u8 *)&field[1];
1006 for (i = 0; i < tp->nr_args; i++)
1007 if (!tp->args[i].type->print(s, tp->args[i].name,
1008 data + tp->args[i].offset, field))
1009 goto partial;
1010
1011 if (!trace_seq_puts(s, "\n"))
1012 goto partial;
1013
1014 return TRACE_TYPE_HANDLED;
1015 partial:
1016 return TRACE_TYPE_PARTIAL_LINE;
1017 }
1018
1019 static enum print_line_t
1020 print_kretprobe_event(struct trace_iterator *iter, int flags,
1021 struct trace_event *event)
1022 {
1023 struct kretprobe_trace_entry_head *field;
1024 struct trace_seq *s = &iter->seq;
1025 struct trace_probe *tp;
1026 u8 *data;
1027 int i;
1028
1029 field = (struct kretprobe_trace_entry_head *)iter->ent;
1030 tp = container_of(event, struct trace_probe, call.event);
1031
1032 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1033 goto partial;
1034
1035 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1036 goto partial;
1037
1038 if (!trace_seq_puts(s, " <- "))
1039 goto partial;
1040
1041 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1042 goto partial;
1043
1044 if (!trace_seq_puts(s, ")"))
1045 goto partial;
1046
1047 data = (u8 *)&field[1];
1048 for (i = 0; i < tp->nr_args; i++)
1049 if (!tp->args[i].type->print(s, tp->args[i].name,
1050 data + tp->args[i].offset, field))
1051 goto partial;
1052
1053 if (!trace_seq_puts(s, "\n"))
1054 goto partial;
1055
1056 return TRACE_TYPE_HANDLED;
1057 partial:
1058 return TRACE_TYPE_PARTIAL_LINE;
1059 }
1060
1061
1062 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1063 {
1064 int ret, i;
1065 struct kprobe_trace_entry_head field;
1066 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1067
1068 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1069 /* Set argument names as fields */
1070 for (i = 0; i < tp->nr_args; i++) {
1071 ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1072 tp->args[i].name,
1073 sizeof(field) + tp->args[i].offset,
1074 tp->args[i].type->size,
1075 tp->args[i].type->is_signed,
1076 FILTER_OTHER);
1077 if (ret)
1078 return ret;
1079 }
1080 return 0;
1081 }
1082
1083 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1084 {
1085 int ret, i;
1086 struct kretprobe_trace_entry_head field;
1087 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1088
1089 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1090 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1091 /* Set argument names as fields */
1092 for (i = 0; i < tp->nr_args; i++) {
1093 ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1094 tp->args[i].name,
1095 sizeof(field) + tp->args[i].offset,
1096 tp->args[i].type->size,
1097 tp->args[i].type->is_signed,
1098 FILTER_OTHER);
1099 if (ret)
1100 return ret;
1101 }
1102 return 0;
1103 }
1104
1105 static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
1106 {
1107 int i;
1108 int pos = 0;
1109
1110 const char *fmt, *arg;
1111
1112 if (!trace_probe_is_return(tp)) {
1113 fmt = "(%lx)";
1114 arg = "REC->" FIELD_STRING_IP;
1115 } else {
1116 fmt = "(%lx <- %lx)";
1117 arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
1118 }
1119
1120 /* When len=0, we just calculate the needed length */
1121 #define LEN_OR_ZERO (len ? len - pos : 0)
1122
1123 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
1124
1125 for (i = 0; i < tp->nr_args; i++) {
1126 pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
1127 tp->args[i].name, tp->args[i].type->fmt);
1128 }
1129
1130 pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
1131
1132 for (i = 0; i < tp->nr_args; i++) {
1133 if (strcmp(tp->args[i].type->name, "string") == 0)
1134 pos += snprintf(buf + pos, LEN_OR_ZERO,
1135 ", __get_str(%s)",
1136 tp->args[i].name);
1137 else
1138 pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
1139 tp->args[i].name);
1140 }
1141
1142 #undef LEN_OR_ZERO
1143
1144 /* return the length of print_fmt */
1145 return pos;
1146 }
1147
1148 static int set_print_fmt(struct trace_probe *tp)
1149 {
1150 int len;
1151 char *print_fmt;
1152
1153 /* First: called with 0 length to calculate the needed length */
1154 len = __set_print_fmt(tp, NULL, 0);
1155 print_fmt = kmalloc(len + 1, GFP_KERNEL);
1156 if (!print_fmt)
1157 return -ENOMEM;
1158
1159 /* Second: actually write the @print_fmt */
1160 __set_print_fmt(tp, print_fmt, len + 1);
1161 tp->call.print_fmt = print_fmt;
1162
1163 return 0;
1164 }
1165
1166 #ifdef CONFIG_PERF_EVENTS
1167
1168 /* Kprobe profile handler */
1169 static __kprobes void
1170 kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
1171 {
1172 struct ftrace_event_call *call = &tp->call;
1173 struct kprobe_trace_entry_head *entry;
1174 struct hlist_head *head;
1175 int size, __size, dsize;
1176 int rctx;
1177
1178 dsize = __get_data_size(tp, regs);
1179 __size = sizeof(*entry) + tp->size + dsize;
1180 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1181 size -= sizeof(u32);
1182 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1183 "profile buffer not large enough"))
1184 return;
1185
1186 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1187 if (!entry)
1188 return;
1189
1190 entry->ip = (unsigned long)tp->rp.kp.addr;
1191 memset(&entry[1], 0, dsize);
1192 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1193
1194 head = this_cpu_ptr(call->perf_events);
1195 perf_trace_buf_submit(entry, size, rctx,
1196 entry->ip, 1, regs, head, NULL);
1197 }
1198
1199 /* Kretprobe profile handler */
1200 static __kprobes void
1201 kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
1202 struct pt_regs *regs)
1203 {
1204 struct ftrace_event_call *call = &tp->call;
1205 struct kretprobe_trace_entry_head *entry;
1206 struct hlist_head *head;
1207 int size, __size, dsize;
1208 int rctx;
1209
1210 dsize = __get_data_size(tp, regs);
1211 __size = sizeof(*entry) + tp->size + dsize;
1212 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1213 size -= sizeof(u32);
1214 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1215 "profile buffer not large enough"))
1216 return;
1217
1218 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1219 if (!entry)
1220 return;
1221
1222 entry->func = (unsigned long)tp->rp.kp.addr;
1223 entry->ret_ip = (unsigned long)ri->ret_addr;
1224 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1225
1226 head = this_cpu_ptr(call->perf_events);
1227 perf_trace_buf_submit(entry, size, rctx,
1228 entry->ret_ip, 1, regs, head, NULL);
1229 }
1230 #endif /* CONFIG_PERF_EVENTS */
1231
1232 static __kprobes
1233 int kprobe_register(struct ftrace_event_call *event,
1234 enum trace_reg type, void *data)
1235 {
1236 struct trace_probe *tp = (struct trace_probe *)event->data;
1237 struct ftrace_event_file *file = data;
1238
1239 switch (type) {
1240 case TRACE_REG_REGISTER:
1241 return enable_trace_probe(tp, file);
1242 case TRACE_REG_UNREGISTER:
1243 return disable_trace_probe(tp, file);
1244
1245 #ifdef CONFIG_PERF_EVENTS
1246 case TRACE_REG_PERF_REGISTER:
1247 return enable_trace_probe(tp, NULL);
1248 case TRACE_REG_PERF_UNREGISTER:
1249 return disable_trace_probe(tp, NULL);
1250 case TRACE_REG_PERF_OPEN:
1251 case TRACE_REG_PERF_CLOSE:
1252 case TRACE_REG_PERF_ADD:
1253 case TRACE_REG_PERF_DEL:
1254 return 0;
1255 #endif
1256 }
1257 return 0;
1258 }
1259
1260 static __kprobes
1261 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1262 {
1263 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1264
1265 tp->nhit++;
1266
1267 if (tp->flags & TP_FLAG_TRACE)
1268 kprobe_trace_func(tp, regs);
1269 #ifdef CONFIG_PERF_EVENTS
1270 if (tp->flags & TP_FLAG_PROFILE)
1271 kprobe_perf_func(tp, regs);
1272 #endif
1273 return 0; /* We don't tweek kernel, so just return 0 */
1274 }
1275
1276 static __kprobes
1277 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1278 {
1279 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1280
1281 tp->nhit++;
1282
1283 if (tp->flags & TP_FLAG_TRACE)
1284 kretprobe_trace_func(tp, ri, regs);
1285 #ifdef CONFIG_PERF_EVENTS
1286 if (tp->flags & TP_FLAG_PROFILE)
1287 kretprobe_perf_func(tp, ri, regs);
1288 #endif
1289 return 0; /* We don't tweek kernel, so just return 0 */
1290 }
1291
1292 static struct trace_event_functions kretprobe_funcs = {
1293 .trace = print_kretprobe_event
1294 };
1295
1296 static struct trace_event_functions kprobe_funcs = {
1297 .trace = print_kprobe_event
1298 };
1299
1300 static int register_probe_event(struct trace_probe *tp)
1301 {
1302 struct ftrace_event_call *call = &tp->call;
1303 int ret;
1304
1305 /* Initialize ftrace_event_call */
1306 INIT_LIST_HEAD(&call->class->fields);
1307 if (trace_probe_is_return(tp)) {
1308 call->event.funcs = &kretprobe_funcs;
1309 call->class->define_fields = kretprobe_event_define_fields;
1310 } else {
1311 call->event.funcs = &kprobe_funcs;
1312 call->class->define_fields = kprobe_event_define_fields;
1313 }
1314 if (set_print_fmt(tp) < 0)
1315 return -ENOMEM;
1316 ret = register_ftrace_event(&call->event);
1317 if (!ret) {
1318 kfree(call->print_fmt);
1319 return -ENODEV;
1320 }
1321 call->flags = 0;
1322 call->class->reg = kprobe_register;
1323 call->data = tp;
1324 ret = trace_add_event_call(call);
1325 if (ret) {
1326 pr_info("Failed to register kprobe event: %s\n", call->name);
1327 kfree(call->print_fmt);
1328 unregister_ftrace_event(&call->event);
1329 }
1330 return ret;
1331 }
1332
1333 static int unregister_probe_event(struct trace_probe *tp)
1334 {
1335 int ret;
1336
1337 /* tp->event is unregistered in trace_remove_event_call() */
1338 ret = trace_remove_event_call(&tp->call);
1339 if (!ret)
1340 kfree(tp->call.print_fmt);
1341 return ret;
1342 }
1343
1344 /* Make a debugfs interface for controlling probe points */
1345 static __init int init_kprobe_trace(void)
1346 {
1347 struct dentry *d_tracer;
1348 struct dentry *entry;
1349
1350 if (register_module_notifier(&trace_probe_module_nb))
1351 return -EINVAL;
1352
1353 d_tracer = tracing_init_dentry();
1354 if (!d_tracer)
1355 return 0;
1356
1357 entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1358 NULL, &kprobe_events_ops);
1359
1360 /* Event list interface */
1361 if (!entry)
1362 pr_warning("Could not create debugfs "
1363 "'kprobe_events' entry\n");
1364
1365 /* Profile interface */
1366 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1367 NULL, &kprobe_profile_ops);
1368
1369 if (!entry)
1370 pr_warning("Could not create debugfs "
1371 "'kprobe_profile' entry\n");
1372 return 0;
1373 }
1374 fs_initcall(init_kprobe_trace);
1375
1376
1377 #ifdef CONFIG_FTRACE_STARTUP_TEST
1378
1379 /*
1380 * The "__used" keeps gcc from removing the function symbol
1381 * from the kallsyms table.
1382 */
1383 static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1384 int a4, int a5, int a6)
1385 {
1386 return a1 + a2 + a3 + a4 + a5 + a6;
1387 }
1388
1389 static struct ftrace_event_file *
1390 find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
1391 {
1392 struct ftrace_event_file *file;
1393
1394 list_for_each_entry(file, &tr->events, list)
1395 if (file->event_call == &tp->call)
1396 return file;
1397
1398 return NULL;
1399 }
1400
1401 static __init int kprobe_trace_self_tests_init(void)
1402 {
1403 int ret, warn = 0;
1404 int (*target)(int, int, int, int, int, int);
1405 struct trace_probe *tp;
1406 struct ftrace_event_file *file;
1407
1408 target = kprobe_trace_selftest_target;
1409
1410 pr_info("Testing kprobe tracing: ");
1411
1412 ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1413 "$stack $stack0 +0($stack)",
1414 create_trace_probe);
1415 if (WARN_ON_ONCE(ret)) {
1416 pr_warn("error on probing function entry.\n");
1417 warn++;
1418 } else {
1419 /* Enable trace point */
1420 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1421 if (WARN_ON_ONCE(tp == NULL)) {
1422 pr_warn("error on getting new probe.\n");
1423 warn++;
1424 } else {
1425 file = find_trace_probe_file(tp, top_trace_array());
1426 if (WARN_ON_ONCE(file == NULL)) {
1427 pr_warn("error on getting probe file.\n");
1428 warn++;
1429 } else
1430 enable_trace_probe(tp, file);
1431 }
1432 }
1433
1434 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1435 "$retval", create_trace_probe);
1436 if (WARN_ON_ONCE(ret)) {
1437 pr_warn("error on probing function return.\n");
1438 warn++;
1439 } else {
1440 /* Enable trace point */
1441 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1442 if (WARN_ON_ONCE(tp == NULL)) {
1443 pr_warn("error on getting 2nd new probe.\n");
1444 warn++;
1445 } else {
1446 file = find_trace_probe_file(tp, top_trace_array());
1447 if (WARN_ON_ONCE(file == NULL)) {
1448 pr_warn("error on getting probe file.\n");
1449 warn++;
1450 } else
1451 enable_trace_probe(tp, file);
1452 }
1453 }
1454
1455 if (warn)
1456 goto end;
1457
1458 ret = target(1, 2, 3, 4, 5, 6);
1459
1460 /* Disable trace points before removing it */
1461 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1462 if (WARN_ON_ONCE(tp == NULL)) {
1463 pr_warn("error on getting test probe.\n");
1464 warn++;
1465 } else {
1466 file = find_trace_probe_file(tp, top_trace_array());
1467 if (WARN_ON_ONCE(file == NULL)) {
1468 pr_warn("error on getting probe file.\n");
1469 warn++;
1470 } else
1471 disable_trace_probe(tp, file);
1472 }
1473
1474 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1475 if (WARN_ON_ONCE(tp == NULL)) {
1476 pr_warn("error on getting 2nd test probe.\n");
1477 warn++;
1478 } else {
1479 file = find_trace_probe_file(tp, top_trace_array());
1480 if (WARN_ON_ONCE(file == NULL)) {
1481 pr_warn("error on getting probe file.\n");
1482 warn++;
1483 } else
1484 disable_trace_probe(tp, file);
1485 }
1486
1487 ret = traceprobe_command("-:testprobe", create_trace_probe);
1488 if (WARN_ON_ONCE(ret)) {
1489 pr_warn("error on deleting a probe.\n");
1490 warn++;
1491 }
1492
1493 ret = traceprobe_command("-:testprobe2", create_trace_probe);
1494 if (WARN_ON_ONCE(ret)) {
1495 pr_warn("error on deleting a probe.\n");
1496 warn++;
1497 }
1498
1499 end:
1500 release_all_trace_probes();
1501 if (warn)
1502 pr_cont("NG: Some tests are failed. Please check them.\n");
1503 else
1504 pr_cont("OK\n");
1505 return 0;
1506 }
1507
1508 late_initcall(kprobe_trace_self_tests_init);
1509
1510 #endif