Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_selftest.c
CommitLineData
60a11774
SR
1/* Include in trace.c */
2
9cc26a26 3#include <linux/stringify.h>
60a11774 4#include <linux/kthread.h>
c7aafc54 5#include <linux/delay.h>
5a0e3ad6 6#include <linux/slab.h>
60a11774 7
e309b41d 8static inline int trace_valid_entry(struct trace_entry *entry)
60a11774
SR
9{
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
57422797 13 case TRACE_WAKE:
06fa75ab 14 case TRACE_STACK:
dd0e545f 15 case TRACE_PRINT:
06fa75ab 16 case TRACE_SPECIAL:
80e5ea45 17 case TRACE_BRANCH:
7447dce9
FW
18 case TRACE_GRAPH_ENT:
19 case TRACE_GRAPH_RET:
0722db01 20 case TRACE_KSYM:
60a11774
SR
21 return 1;
22 }
23 return 0;
24}
25
3928a8a2 26static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
60a11774 27{
3928a8a2
SR
28 struct ring_buffer_event *event;
29 struct trace_entry *entry;
4b3e3d22 30 unsigned int loops = 0;
60a11774 31
66a8cb95 32 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
3928a8a2 33 entry = ring_buffer_event_data(event);
60a11774 34
4b3e3d22
SR
35 /*
36 * The ring buffer is a size of trace_buf_size, if
37 * we loop more than the size, there's something wrong
38 * with the ring buffer.
39 */
40 if (loops++ > trace_buf_size) {
41 printk(KERN_CONT ".. bad ring buffer ");
42 goto failed;
43 }
3928a8a2 44 if (!trace_valid_entry(entry)) {
c7aafc54 45 printk(KERN_CONT ".. invalid entry %d ",
3928a8a2 46 entry->type);
60a11774
SR
47 goto failed;
48 }
60a11774 49 }
60a11774
SR
50 return 0;
51
52 failed:
08bafa0e
SR
53 /* disable tracing */
54 tracing_disabled = 1;
60a11774
SR
55 printk(KERN_CONT ".. corrupted trace buffer .. ");
56 return -1;
57}
58
59/*
60 * Test the trace buffer to see if all the elements
61 * are still sane.
62 */
63static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
64{
30afdcb1
SR
65 unsigned long flags, cnt = 0;
66 int cpu, ret = 0;
60a11774 67
30afdcb1 68 /* Don't allow flipping of max traces now */
d51ad7ac 69 local_irq_save(flags);
0199c4e6 70 arch_spin_lock(&ftrace_max_lock);
60a11774 71
3928a8a2 72 cnt = ring_buffer_entries(tr->buffer);
60a11774 73
0c5119c1
SR
74 /*
75 * The trace_test_buffer_cpu runs a while loop to consume all data.
76 * If the calling tracer is broken, and is constantly filling
77 * the buffer, this will run forever, and hard lock the box.
78 * We disable the ring buffer while we do this test to prevent
79 * a hard lock up.
80 */
81 tracing_off();
3928a8a2
SR
82 for_each_possible_cpu(cpu) {
83 ret = trace_test_buffer_cpu(tr, cpu);
60a11774
SR
84 if (ret)
85 break;
86 }
0c5119c1 87 tracing_on();
0199c4e6 88 arch_spin_unlock(&ftrace_max_lock);
d51ad7ac 89 local_irq_restore(flags);
60a11774
SR
90
91 if (count)
92 *count = cnt;
93
94 return ret;
95}
96
1c80025a
FW
97static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
98{
99 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
100 trace->name, init_ret);
101}
606576ce 102#ifdef CONFIG_FUNCTION_TRACER
77a2b37d
SR
103
104#ifdef CONFIG_DYNAMIC_FTRACE
105
77a2b37d
SR
106/* Test dynamic code modification and ftrace filters */
107int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
108 struct trace_array *tr,
109 int (*func)(void))
110{
77a2b37d
SR
111 int save_ftrace_enabled = ftrace_enabled;
112 int save_tracer_enabled = tracer_enabled;
dd0e545f 113 unsigned long count;
4e491d14 114 char *func_name;
dd0e545f 115 int ret;
77a2b37d
SR
116
117 /* The ftrace test PASSED */
118 printk(KERN_CONT "PASSED\n");
119 pr_info("Testing dynamic ftrace: ");
120
121 /* enable tracing, and record the filter function */
122 ftrace_enabled = 1;
123 tracer_enabled = 1;
124
125 /* passed in by parameter to fool gcc from optimizing */
126 func();
127
4e491d14 128 /*
73d8b8bc 129 * Some archs *cough*PowerPC*cough* add characters to the
4e491d14 130 * start of the function names. We simply put a '*' to
73d8b8bc 131 * accommodate them.
4e491d14 132 */
9cc26a26 133 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
4e491d14 134
77a2b37d 135 /* filter only on our function */
4e491d14 136 ftrace_set_filter(func_name, strlen(func_name), 1);
77a2b37d
SR
137
138 /* enable tracing */
b6f11df2 139 ret = tracer_init(trace, tr);
1c80025a
FW
140 if (ret) {
141 warn_failed_init_tracer(trace, ret);
142 goto out;
143 }
dd0e545f 144
77a2b37d
SR
145 /* Sleep for a 1/10 of a second */
146 msleep(100);
147
148 /* we should have nothing in the buffer */
149 ret = trace_test_buffer(tr, &count);
150 if (ret)
151 goto out;
152
153 if (count) {
154 ret = -1;
155 printk(KERN_CONT ".. filter did not filter .. ");
156 goto out;
157 }
158
159 /* call our function again */
160 func();
161
162 /* sleep again */
163 msleep(100);
164
165 /* stop the tracing. */
bbf5b1a0 166 tracing_stop();
77a2b37d
SR
167 ftrace_enabled = 0;
168
169 /* check the trace buffer */
170 ret = trace_test_buffer(tr, &count);
171 trace->reset(tr);
bbf5b1a0 172 tracing_start();
77a2b37d
SR
173
174 /* we should only have one item */
175 if (!ret && count != 1) {
06fa75ab 176 printk(KERN_CONT ".. filter failed count=%ld ..", count);
77a2b37d
SR
177 ret = -1;
178 goto out;
179 }
bbf5b1a0 180
77a2b37d
SR
181 out:
182 ftrace_enabled = save_ftrace_enabled;
183 tracer_enabled = save_tracer_enabled;
184
185 /* Enable tracing on all functions again */
186 ftrace_set_filter(NULL, 0, 1);
187
188 return ret;
189}
190#else
191# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
192#endif /* CONFIG_DYNAMIC_FTRACE */
e9a22d1f 193
60a11774
SR
194/*
195 * Simple verification test of ftrace function tracer.
196 * Enable ftrace, sleep 1/10 second, and then read the trace
197 * buffer to see if all is in order.
198 */
199int
200trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
201{
77a2b37d
SR
202 int save_ftrace_enabled = ftrace_enabled;
203 int save_tracer_enabled = tracer_enabled;
dd0e545f
SR
204 unsigned long count;
205 int ret;
60a11774 206
77a2b37d
SR
207 /* make sure msleep has been recorded */
208 msleep(1);
209
60a11774 210 /* start the tracing */
c7aafc54 211 ftrace_enabled = 1;
77a2b37d 212 tracer_enabled = 1;
c7aafc54 213
b6f11df2 214 ret = tracer_init(trace, tr);
1c80025a
FW
215 if (ret) {
216 warn_failed_init_tracer(trace, ret);
217 goto out;
218 }
219
60a11774
SR
220 /* Sleep for a 1/10 of a second */
221 msleep(100);
222 /* stop the tracing. */
bbf5b1a0 223 tracing_stop();
c7aafc54
IM
224 ftrace_enabled = 0;
225
60a11774
SR
226 /* check the trace buffer */
227 ret = trace_test_buffer(tr, &count);
228 trace->reset(tr);
bbf5b1a0 229 tracing_start();
60a11774
SR
230
231 if (!ret && !count) {
232 printk(KERN_CONT ".. no entries found ..");
233 ret = -1;
77a2b37d 234 goto out;
60a11774
SR
235 }
236
77a2b37d
SR
237 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
238 DYN_FTRACE_TEST_NAME);
239
240 out:
241 ftrace_enabled = save_ftrace_enabled;
242 tracer_enabled = save_tracer_enabled;
243
4eebcc81
SR
244 /* kill ftrace totally if we failed */
245 if (ret)
246 ftrace_kill();
247
60a11774
SR
248 return ret;
249}
606576ce 250#endif /* CONFIG_FUNCTION_TRACER */
60a11774 251
7447dce9
FW
252
253#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cf586b61
FW
254
255/* Maximum number of functions to trace before diagnosing a hang */
256#define GRAPH_MAX_FUNC_TEST 100000000
257
cecbca96
FW
258static void
259__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
cf586b61
FW
260static unsigned int graph_hang_thresh;
261
262/* Wrap the real function entry probe to avoid possible hanging */
263static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
264{
265 /* This is harmlessly racy, we want to approximately detect a hang */
266 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
267 ftrace_graph_stop();
268 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
269 if (ftrace_dump_on_oops)
cecbca96 270 __ftrace_dump(false, DUMP_ALL);
cf586b61
FW
271 return 0;
272 }
273
274 return trace_graph_entry(trace);
275}
276
7447dce9
FW
277/*
278 * Pretty much the same than for the function tracer from which the selftest
279 * has been borrowed.
280 */
281int
282trace_selftest_startup_function_graph(struct tracer *trace,
283 struct trace_array *tr)
284{
285 int ret;
286 unsigned long count;
287
cf586b61
FW
288 /*
289 * Simulate the init() callback but we attach a watchdog callback
290 * to detect and recover from possible hangs
291 */
292 tracing_reset_online_cpus(tr);
1a0799a8 293 set_graph_array(tr);
cf586b61
FW
294 ret = register_ftrace_graph(&trace_graph_return,
295 &trace_graph_entry_watchdog);
7447dce9
FW
296 if (ret) {
297 warn_failed_init_tracer(trace, ret);
298 goto out;
299 }
cf586b61 300 tracing_start_cmdline_record();
7447dce9
FW
301
302 /* Sleep for a 1/10 of a second */
303 msleep(100);
304
cf586b61
FW
305 /* Have we just recovered from a hang? */
306 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
0cf53ff6 307 tracing_selftest_disabled = true;
cf586b61
FW
308 ret = -1;
309 goto out;
310 }
311
7447dce9
FW
312 tracing_stop();
313
314 /* check the trace buffer */
315 ret = trace_test_buffer(tr, &count);
316
317 trace->reset(tr);
318 tracing_start();
319
320 if (!ret && !count) {
321 printk(KERN_CONT ".. no entries found ..");
322 ret = -1;
323 goto out;
324 }
325
326 /* Don't test dynamic tracing, the function tracer already did */
327
328out:
329 /* Stop it if we failed */
330 if (ret)
331 ftrace_graph_stop();
332
333 return ret;
334}
335#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
336
337
60a11774
SR
338#ifdef CONFIG_IRQSOFF_TRACER
339int
340trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
341{
342 unsigned long save_max = tracing_max_latency;
343 unsigned long count;
344 int ret;
345
346 /* start the tracing */
b6f11df2 347 ret = tracer_init(trace, tr);
1c80025a
FW
348 if (ret) {
349 warn_failed_init_tracer(trace, ret);
350 return ret;
351 }
352
60a11774
SR
353 /* reset the max latency */
354 tracing_max_latency = 0;
355 /* disable interrupts for a bit */
356 local_irq_disable();
357 udelay(100);
358 local_irq_enable();
49036200
FW
359
360 /*
361 * Stop the tracer to avoid a warning subsequent
362 * to buffer flipping failure because tracing_stop()
363 * disables the tr and max buffers, making flipping impossible
364 * in case of parallels max irqs off latencies.
365 */
366 trace->stop(tr);
60a11774 367 /* stop the tracing. */
bbf5b1a0 368 tracing_stop();
60a11774
SR
369 /* check both trace buffers */
370 ret = trace_test_buffer(tr, NULL);
371 if (!ret)
372 ret = trace_test_buffer(&max_tr, &count);
373 trace->reset(tr);
bbf5b1a0 374 tracing_start();
60a11774
SR
375
376 if (!ret && !count) {
377 printk(KERN_CONT ".. no entries found ..");
378 ret = -1;
379 }
380
381 tracing_max_latency = save_max;
382
383 return ret;
384}
385#endif /* CONFIG_IRQSOFF_TRACER */
386
387#ifdef CONFIG_PREEMPT_TRACER
388int
389trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
390{
391 unsigned long save_max = tracing_max_latency;
392 unsigned long count;
393 int ret;
394
769c48eb
SR
395 /*
396 * Now that the big kernel lock is no longer preemptable,
397 * and this is called with the BKL held, it will always
398 * fail. If preemption is already disabled, simply
399 * pass the test. When the BKL is removed, or becomes
400 * preemptible again, we will once again test this,
401 * so keep it in.
402 */
403 if (preempt_count()) {
404 printk(KERN_CONT "can not test ... force ");
405 return 0;
406 }
407
60a11774 408 /* start the tracing */
b6f11df2 409 ret = tracer_init(trace, tr);
1c80025a
FW
410 if (ret) {
411 warn_failed_init_tracer(trace, ret);
412 return ret;
413 }
414
60a11774
SR
415 /* reset the max latency */
416 tracing_max_latency = 0;
417 /* disable preemption for a bit */
418 preempt_disable();
419 udelay(100);
420 preempt_enable();
49036200
FW
421
422 /*
423 * Stop the tracer to avoid a warning subsequent
424 * to buffer flipping failure because tracing_stop()
425 * disables the tr and max buffers, making flipping impossible
426 * in case of parallels max preempt off latencies.
427 */
428 trace->stop(tr);
60a11774 429 /* stop the tracing. */
bbf5b1a0 430 tracing_stop();
60a11774
SR
431 /* check both trace buffers */
432 ret = trace_test_buffer(tr, NULL);
433 if (!ret)
434 ret = trace_test_buffer(&max_tr, &count);
435 trace->reset(tr);
bbf5b1a0 436 tracing_start();
60a11774
SR
437
438 if (!ret && !count) {
439 printk(KERN_CONT ".. no entries found ..");
440 ret = -1;
441 }
442
443 tracing_max_latency = save_max;
444
445 return ret;
446}
447#endif /* CONFIG_PREEMPT_TRACER */
448
449#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
450int
451trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
452{
453 unsigned long save_max = tracing_max_latency;
454 unsigned long count;
455 int ret;
456
769c48eb
SR
457 /*
458 * Now that the big kernel lock is no longer preemptable,
459 * and this is called with the BKL held, it will always
460 * fail. If preemption is already disabled, simply
461 * pass the test. When the BKL is removed, or becomes
462 * preemptible again, we will once again test this,
463 * so keep it in.
464 */
465 if (preempt_count()) {
466 printk(KERN_CONT "can not test ... force ");
467 return 0;
468 }
469
60a11774 470 /* start the tracing */
b6f11df2 471 ret = tracer_init(trace, tr);
1c80025a
FW
472 if (ret) {
473 warn_failed_init_tracer(trace, ret);
ac1d52d0 474 goto out_no_start;
1c80025a 475 }
60a11774
SR
476
477 /* reset the max latency */
478 tracing_max_latency = 0;
479
480 /* disable preemption and interrupts for a bit */
481 preempt_disable();
482 local_irq_disable();
483 udelay(100);
484 preempt_enable();
485 /* reverse the order of preempt vs irqs */
486 local_irq_enable();
487
49036200
FW
488 /*
489 * Stop the tracer to avoid a warning subsequent
490 * to buffer flipping failure because tracing_stop()
491 * disables the tr and max buffers, making flipping impossible
492 * in case of parallels max irqs/preempt off latencies.
493 */
494 trace->stop(tr);
60a11774 495 /* stop the tracing. */
bbf5b1a0 496 tracing_stop();
60a11774
SR
497 /* check both trace buffers */
498 ret = trace_test_buffer(tr, NULL);
ac1d52d0 499 if (ret)
60a11774
SR
500 goto out;
501
502 ret = trace_test_buffer(&max_tr, &count);
ac1d52d0 503 if (ret)
60a11774
SR
504 goto out;
505
506 if (!ret && !count) {
507 printk(KERN_CONT ".. no entries found ..");
508 ret = -1;
509 goto out;
510 }
511
512 /* do the test by disabling interrupts first this time */
513 tracing_max_latency = 0;
bbf5b1a0 514 tracing_start();
49036200
FW
515 trace->start(tr);
516
60a11774
SR
517 preempt_disable();
518 local_irq_disable();
519 udelay(100);
520 preempt_enable();
521 /* reverse the order of preempt vs irqs */
522 local_irq_enable();
523
49036200 524 trace->stop(tr);
60a11774 525 /* stop the tracing. */
bbf5b1a0 526 tracing_stop();
60a11774
SR
527 /* check both trace buffers */
528 ret = trace_test_buffer(tr, NULL);
529 if (ret)
530 goto out;
531
532 ret = trace_test_buffer(&max_tr, &count);
533
534 if (!ret && !count) {
535 printk(KERN_CONT ".. no entries found ..");
536 ret = -1;
537 goto out;
538 }
539
ac1d52d0 540out:
bbf5b1a0 541 tracing_start();
ac1d52d0
FW
542out_no_start:
543 trace->reset(tr);
60a11774
SR
544 tracing_max_latency = save_max;
545
546 return ret;
547}
548#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
549
fb1b6d8b
SN
550#ifdef CONFIG_NOP_TRACER
551int
552trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
553{
554 /* What could possibly go wrong? */
555 return 0;
556}
557#endif
558
60a11774
SR
559#ifdef CONFIG_SCHED_TRACER
560static int trace_wakeup_test_thread(void *data)
561{
60a11774 562 /* Make this a RT thread, doesn't need to be too high */
05bd68c5
SR
563 struct sched_param param = { .sched_priority = 5 };
564 struct completion *x = data;
60a11774 565
05bd68c5 566 sched_setscheduler(current, SCHED_FIFO, &param);
60a11774
SR
567
568 /* Make it know we have a new prio */
569 complete(x);
570
571 /* now go to sleep and let the test wake us up */
572 set_current_state(TASK_INTERRUPTIBLE);
573 schedule();
574
575 /* we are awake, now wait to disappear */
576 while (!kthread_should_stop()) {
577 /*
578 * This is an RT task, do short sleeps to let
579 * others run.
580 */
581 msleep(100);
582 }
583
584 return 0;
585}
586
587int
588trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
589{
590 unsigned long save_max = tracing_max_latency;
591 struct task_struct *p;
592 struct completion isrt;
593 unsigned long count;
594 int ret;
595
596 init_completion(&isrt);
597
598 /* create a high prio thread */
599 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
c7aafc54 600 if (IS_ERR(p)) {
60a11774
SR
601 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
602 return -1;
603 }
604
605 /* make sure the thread is running at an RT prio */
606 wait_for_completion(&isrt);
607
608 /* start the tracing */
b6f11df2 609 ret = tracer_init(trace, tr);
1c80025a
FW
610 if (ret) {
611 warn_failed_init_tracer(trace, ret);
612 return ret;
613 }
614
60a11774
SR
615 /* reset the max latency */
616 tracing_max_latency = 0;
617
618 /* sleep to let the RT thread sleep too */
619 msleep(100);
620
621 /*
622 * Yes this is slightly racy. It is possible that for some
623 * strange reason that the RT thread we created, did not
624 * call schedule for 100ms after doing the completion,
625 * and we do a wakeup on a task that already is awake.
626 * But that is extremely unlikely, and the worst thing that
627 * happens in such a case, is that we disable tracing.
628 * Honestly, if this race does happen something is horrible
629 * wrong with the system.
630 */
631
632 wake_up_process(p);
633
5aa60c60
SR
634 /* give a little time to let the thread wake up */
635 msleep(100);
636
60a11774 637 /* stop the tracing. */
bbf5b1a0 638 tracing_stop();
60a11774
SR
639 /* check both trace buffers */
640 ret = trace_test_buffer(tr, NULL);
641 if (!ret)
642 ret = trace_test_buffer(&max_tr, &count);
643
644
645 trace->reset(tr);
bbf5b1a0 646 tracing_start();
60a11774
SR
647
648 tracing_max_latency = save_max;
649
650 /* kill the thread */
651 kthread_stop(p);
652
653 if (!ret && !count) {
654 printk(KERN_CONT ".. no entries found ..");
655 ret = -1;
656 }
657
658 return ret;
659}
660#endif /* CONFIG_SCHED_TRACER */
661
662#ifdef CONFIG_CONTEXT_SWITCH_TRACER
663int
664trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
665{
666 unsigned long count;
667 int ret;
668
669 /* start the tracing */
b6f11df2 670 ret = tracer_init(trace, tr);
1c80025a
FW
671 if (ret) {
672 warn_failed_init_tracer(trace, ret);
673 return ret;
674 }
675
60a11774
SR
676 /* Sleep for a 1/10 of a second */
677 msleep(100);
678 /* stop the tracing. */
bbf5b1a0 679 tracing_stop();
60a11774
SR
680 /* check the trace buffer */
681 ret = trace_test_buffer(tr, &count);
682 trace->reset(tr);
bbf5b1a0 683 tracing_start();
60a11774
SR
684
685 if (!ret && !count) {
686 printk(KERN_CONT ".. no entries found ..");
687 ret = -1;
688 }
689
690 return ret;
691}
692#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
a6dd24f8
IM
693
694#ifdef CONFIG_SYSPROF_TRACER
695int
696trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
697{
698 unsigned long count;
699 int ret;
700
701 /* start the tracing */
b6f11df2 702 ret = tracer_init(trace, tr);
1c80025a
FW
703 if (ret) {
704 warn_failed_init_tracer(trace, ret);
d2ef7c2f 705 return ret;
1c80025a
FW
706 }
707
a6dd24f8
IM
708 /* Sleep for a 1/10 of a second */
709 msleep(100);
710 /* stop the tracing. */
bbf5b1a0 711 tracing_stop();
a6dd24f8
IM
712 /* check the trace buffer */
713 ret = trace_test_buffer(tr, &count);
714 trace->reset(tr);
bbf5b1a0 715 tracing_start();
a6dd24f8 716
d2ef7c2f
WH
717 if (!ret && !count) {
718 printk(KERN_CONT ".. no entries found ..");
719 ret = -1;
720 }
721
a6dd24f8
IM
722 return ret;
723}
724#endif /* CONFIG_SYSPROF_TRACER */
80e5ea45
SR
725
726#ifdef CONFIG_BRANCH_TRACER
727int
728trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
729{
730 unsigned long count;
731 int ret;
732
733 /* start the tracing */
b6f11df2 734 ret = tracer_init(trace, tr);
1c80025a
FW
735 if (ret) {
736 warn_failed_init_tracer(trace, ret);
737 return ret;
738 }
739
80e5ea45
SR
740 /* Sleep for a 1/10 of a second */
741 msleep(100);
742 /* stop the tracing. */
743 tracing_stop();
744 /* check the trace buffer */
745 ret = trace_test_buffer(tr, &count);
746 trace->reset(tr);
747 tracing_start();
748
d2ef7c2f
WH
749 if (!ret && !count) {
750 printk(KERN_CONT ".. no entries found ..");
751 ret = -1;
752 }
753
80e5ea45
SR
754 return ret;
755}
756#endif /* CONFIG_BRANCH_TRACER */
321bb5e1 757
0722db01
P
758#ifdef CONFIG_KSYM_TRACER
759static int ksym_selftest_dummy;
760
761int
762trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
763{
764 unsigned long count;
765 int ret;
766
767 /* start the tracing */
768 ret = tracer_init(trace, tr);
769 if (ret) {
770 warn_failed_init_tracer(trace, ret);
771 return ret;
772 }
773
774 ksym_selftest_dummy = 0;
775 /* Register the read-write tracing request */
30ff21e3
LZ
776
777 ret = process_new_ksym_entry("ksym_selftest_dummy",
24f1e32c 778 HW_BREAKPOINT_R | HW_BREAKPOINT_W,
0722db01
P
779 (unsigned long)(&ksym_selftest_dummy));
780
781 if (ret < 0) {
782 printk(KERN_CONT "ksym_trace read-write startup test failed\n");
783 goto ret_path;
784 }
785 /* Perform a read and a write operation over the dummy variable to
786 * trigger the tracer
787 */
788 if (ksym_selftest_dummy == 0)
789 ksym_selftest_dummy++;
790
791 /* stop the tracing. */
792 tracing_stop();
793 /* check the trace buffer */
794 ret = trace_test_buffer(tr, &count);
795 trace->reset(tr);
796 tracing_start();
797
798 /* read & write operations - one each is performed on the dummy variable
799 * triggering two entries in the trace buffer
800 */
801 if (!ret && count != 2) {
802 printk(KERN_CONT "Ksym tracer startup test failed");
803 ret = -1;
804 }
805
806ret_path:
807 return ret;
808}
809#endif /* CONFIG_KSYM_TRACER */
810