tracing/ftrace: don't consume unhandled entries by boot tracer
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_selftest.c
CommitLineData
60a11774
SR
1/* Include in trace.c */
2
3#include <linux/kthread.h>
c7aafc54 4#include <linux/delay.h>
60a11774 5
e309b41d 6static inline int trace_valid_entry(struct trace_entry *entry)
60a11774
SR
7{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
57422797 11 case TRACE_WAKE:
dd0e545f 12 case TRACE_CONT:
06fa75ab 13 case TRACE_STACK:
dd0e545f 14 case TRACE_PRINT:
06fa75ab 15 case TRACE_SPECIAL:
60a11774
SR
16 return 1;
17 }
18 return 0;
19}
20
21static int
22trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
23{
60a11774 24 struct trace_entry *entries;
c7aafc54 25 struct page *page;
60a11774
SR
26 int idx = 0;
27 int i;
28
c7aafc54 29 BUG_ON(list_empty(&data->trace_pages));
60a11774
SR
30 page = list_entry(data->trace_pages.next, struct page, lru);
31 entries = page_address(page);
32
19384c03 33 check_pages(data);
c7aafc54 34 if (head_page(data) != entries)
60a11774
SR
35 goto failed;
36
37 /*
38 * The starting trace buffer always has valid elements,
c7aafc54 39 * if any element exists.
60a11774 40 */
c7aafc54 41 entries = head_page(data);
60a11774
SR
42
43 for (i = 0; i < tr->entries; i++) {
44
c7aafc54
IM
45 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
46 printk(KERN_CONT ".. invalid entry %d ",
47 entries[idx].type);
60a11774
SR
48 goto failed;
49 }
50
51 idx++;
52 if (idx >= ENTRIES_PER_PAGE) {
53 page = virt_to_page(entries);
54 if (page->lru.next == &data->trace_pages) {
55 if (i != tr->entries - 1) {
56 printk(KERN_CONT ".. entries buffer mismatch");
57 goto failed;
58 }
59 } else {
60 page = list_entry(page->lru.next, struct page, lru);
61 entries = page_address(page);
62 }
63 idx = 0;
64 }
65 }
66
67 page = virt_to_page(entries);
68 if (page->lru.next != &data->trace_pages) {
69 printk(KERN_CONT ".. too many entries");
70 goto failed;
71 }
72
73 return 0;
74
75 failed:
08bafa0e
SR
76 /* disable tracing */
77 tracing_disabled = 1;
60a11774
SR
78 printk(KERN_CONT ".. corrupted trace buffer .. ");
79 return -1;
80}
81
82/*
83 * Test the trace buffer to see if all the elements
84 * are still sane.
85 */
86static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
87{
30afdcb1
SR
88 unsigned long flags, cnt = 0;
89 int cpu, ret = 0;
60a11774 90
30afdcb1
SR
91 /* Don't allow flipping of max traces now */
92 raw_local_irq_save(flags);
93 __raw_spin_lock(&ftrace_max_lock);
60a11774 94 for_each_possible_cpu(cpu) {
c7aafc54 95 if (!head_page(tr->data[cpu]))
60a11774
SR
96 continue;
97
98 cnt += tr->data[cpu]->trace_idx;
60a11774
SR
99
100 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
101 if (ret)
102 break;
103 }
30afdcb1
SR
104 __raw_spin_unlock(&ftrace_max_lock);
105 raw_local_irq_restore(flags);
60a11774
SR
106
107 if (count)
108 *count = cnt;
109
110 return ret;
111}
112
113#ifdef CONFIG_FTRACE
77a2b37d
SR
114
115#ifdef CONFIG_DYNAMIC_FTRACE
116
77a2b37d
SR
117#define __STR(x) #x
118#define STR(x) __STR(x)
77a2b37d
SR
119
120/* Test dynamic code modification and ftrace filters */
121int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
122 struct trace_array *tr,
123 int (*func)(void))
124{
77a2b37d
SR
125 int save_ftrace_enabled = ftrace_enabled;
126 int save_tracer_enabled = tracer_enabled;
dd0e545f 127 unsigned long count;
4e491d14 128 char *func_name;
dd0e545f 129 int ret;
77a2b37d
SR
130
131 /* The ftrace test PASSED */
132 printk(KERN_CONT "PASSED\n");
133 pr_info("Testing dynamic ftrace: ");
134
135 /* enable tracing, and record the filter function */
136 ftrace_enabled = 1;
137 tracer_enabled = 1;
138
139 /* passed in by parameter to fool gcc from optimizing */
140 func();
141
142 /* update the records */
143 ret = ftrace_force_update();
144 if (ret) {
145 printk(KERN_CONT ".. ftraced failed .. ");
146 return ret;
147 }
148
4e491d14
SR
149 /*
150 * Some archs *cough*PowerPC*cough* add charachters to the
151 * start of the function names. We simply put a '*' to
152 * accomodate them.
153 */
154 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
155
77a2b37d 156 /* filter only on our function */
4e491d14 157 ftrace_set_filter(func_name, strlen(func_name), 1);
77a2b37d
SR
158
159 /* enable tracing */
160 tr->ctrl = 1;
161 trace->init(tr);
dd0e545f 162
77a2b37d
SR
163 /* Sleep for a 1/10 of a second */
164 msleep(100);
165
166 /* we should have nothing in the buffer */
167 ret = trace_test_buffer(tr, &count);
168 if (ret)
169 goto out;
170
171 if (count) {
172 ret = -1;
173 printk(KERN_CONT ".. filter did not filter .. ");
174 goto out;
175 }
176
177 /* call our function again */
178 func();
179
180 /* sleep again */
181 msleep(100);
182
183 /* stop the tracing. */
184 tr->ctrl = 0;
185 trace->ctrl_update(tr);
186 ftrace_enabled = 0;
187
188 /* check the trace buffer */
189 ret = trace_test_buffer(tr, &count);
190 trace->reset(tr);
191
192 /* we should only have one item */
193 if (!ret && count != 1) {
06fa75ab 194 printk(KERN_CONT ".. filter failed count=%ld ..", count);
77a2b37d
SR
195 ret = -1;
196 goto out;
197 }
198 out:
199 ftrace_enabled = save_ftrace_enabled;
200 tracer_enabled = save_tracer_enabled;
201
202 /* Enable tracing on all functions again */
203 ftrace_set_filter(NULL, 0, 1);
204
205 return ret;
206}
207#else
208# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
209#endif /* CONFIG_DYNAMIC_FTRACE */
60a11774
SR
210/*
211 * Simple verification test of ftrace function tracer.
212 * Enable ftrace, sleep 1/10 second, and then read the trace
213 * buffer to see if all is in order.
214 */
215int
216trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
217{
77a2b37d
SR
218 int save_ftrace_enabled = ftrace_enabled;
219 int save_tracer_enabled = tracer_enabled;
dd0e545f
SR
220 unsigned long count;
221 int ret;
60a11774 222
77a2b37d
SR
223 /* make sure msleep has been recorded */
224 msleep(1);
225
226 /* force the recorded functions to be traced */
60a11774
SR
227 ret = ftrace_force_update();
228 if (ret) {
229 printk(KERN_CONT ".. ftraced failed .. ");
230 return ret;
231 }
232
233 /* start the tracing */
c7aafc54 234 ftrace_enabled = 1;
77a2b37d 235 tracer_enabled = 1;
c7aafc54 236
60a11774
SR
237 tr->ctrl = 1;
238 trace->init(tr);
239 /* Sleep for a 1/10 of a second */
240 msleep(100);
241 /* stop the tracing. */
242 tr->ctrl = 0;
243 trace->ctrl_update(tr);
c7aafc54
IM
244 ftrace_enabled = 0;
245
60a11774
SR
246 /* check the trace buffer */
247 ret = trace_test_buffer(tr, &count);
248 trace->reset(tr);
249
250 if (!ret && !count) {
251 printk(KERN_CONT ".. no entries found ..");
252 ret = -1;
77a2b37d 253 goto out;
60a11774
SR
254 }
255
77a2b37d
SR
256 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
257 DYN_FTRACE_TEST_NAME);
258
259 out:
260 ftrace_enabled = save_ftrace_enabled;
261 tracer_enabled = save_tracer_enabled;
262
4eebcc81
SR
263 /* kill ftrace totally if we failed */
264 if (ret)
265 ftrace_kill();
266
60a11774
SR
267 return ret;
268}
269#endif /* CONFIG_FTRACE */
270
271#ifdef CONFIG_IRQSOFF_TRACER
272int
273trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
274{
275 unsigned long save_max = tracing_max_latency;
276 unsigned long count;
277 int ret;
278
279 /* start the tracing */
280 tr->ctrl = 1;
281 trace->init(tr);
282 /* reset the max latency */
283 tracing_max_latency = 0;
284 /* disable interrupts for a bit */
285 local_irq_disable();
286 udelay(100);
287 local_irq_enable();
288 /* stop the tracing. */
289 tr->ctrl = 0;
290 trace->ctrl_update(tr);
291 /* check both trace buffers */
292 ret = trace_test_buffer(tr, NULL);
293 if (!ret)
294 ret = trace_test_buffer(&max_tr, &count);
295 trace->reset(tr);
296
297 if (!ret && !count) {
298 printk(KERN_CONT ".. no entries found ..");
299 ret = -1;
300 }
301
302 tracing_max_latency = save_max;
303
304 return ret;
305}
306#endif /* CONFIG_IRQSOFF_TRACER */
307
308#ifdef CONFIG_PREEMPT_TRACER
309int
310trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
311{
312 unsigned long save_max = tracing_max_latency;
313 unsigned long count;
314 int ret;
315
316 /* start the tracing */
317 tr->ctrl = 1;
318 trace->init(tr);
319 /* reset the max latency */
320 tracing_max_latency = 0;
321 /* disable preemption for a bit */
322 preempt_disable();
323 udelay(100);
324 preempt_enable();
325 /* stop the tracing. */
326 tr->ctrl = 0;
327 trace->ctrl_update(tr);
328 /* check both trace buffers */
329 ret = trace_test_buffer(tr, NULL);
330 if (!ret)
331 ret = trace_test_buffer(&max_tr, &count);
332 trace->reset(tr);
333
334 if (!ret && !count) {
335 printk(KERN_CONT ".. no entries found ..");
336 ret = -1;
337 }
338
339 tracing_max_latency = save_max;
340
341 return ret;
342}
343#endif /* CONFIG_PREEMPT_TRACER */
344
345#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
346int
347trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
348{
349 unsigned long save_max = tracing_max_latency;
350 unsigned long count;
351 int ret;
352
353 /* start the tracing */
354 tr->ctrl = 1;
355 trace->init(tr);
356
357 /* reset the max latency */
358 tracing_max_latency = 0;
359
360 /* disable preemption and interrupts for a bit */
361 preempt_disable();
362 local_irq_disable();
363 udelay(100);
364 preempt_enable();
365 /* reverse the order of preempt vs irqs */
366 local_irq_enable();
367
368 /* stop the tracing. */
369 tr->ctrl = 0;
370 trace->ctrl_update(tr);
371 /* check both trace buffers */
372 ret = trace_test_buffer(tr, NULL);
373 if (ret)
374 goto out;
375
376 ret = trace_test_buffer(&max_tr, &count);
377 if (ret)
378 goto out;
379
380 if (!ret && !count) {
381 printk(KERN_CONT ".. no entries found ..");
382 ret = -1;
383 goto out;
384 }
385
386 /* do the test by disabling interrupts first this time */
387 tracing_max_latency = 0;
388 tr->ctrl = 1;
389 trace->ctrl_update(tr);
390 preempt_disable();
391 local_irq_disable();
392 udelay(100);
393 preempt_enable();
394 /* reverse the order of preempt vs irqs */
395 local_irq_enable();
396
397 /* stop the tracing. */
398 tr->ctrl = 0;
399 trace->ctrl_update(tr);
400 /* check both trace buffers */
401 ret = trace_test_buffer(tr, NULL);
402 if (ret)
403 goto out;
404
405 ret = trace_test_buffer(&max_tr, &count);
406
407 if (!ret && !count) {
408 printk(KERN_CONT ".. no entries found ..");
409 ret = -1;
410 goto out;
411 }
412
413 out:
414 trace->reset(tr);
415 tracing_max_latency = save_max;
416
417 return ret;
418}
419#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
420
fb1b6d8b
SN
421#ifdef CONFIG_NOP_TRACER
422int
423trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
424{
425 /* What could possibly go wrong? */
426 return 0;
427}
428#endif
429
60a11774
SR
430#ifdef CONFIG_SCHED_TRACER
431static int trace_wakeup_test_thread(void *data)
432{
60a11774 433 /* Make this a RT thread, doesn't need to be too high */
05bd68c5
SR
434 struct sched_param param = { .sched_priority = 5 };
435 struct completion *x = data;
60a11774 436
05bd68c5 437 sched_setscheduler(current, SCHED_FIFO, &param);
60a11774
SR
438
439 /* Make it know we have a new prio */
440 complete(x);
441
442 /* now go to sleep and let the test wake us up */
443 set_current_state(TASK_INTERRUPTIBLE);
444 schedule();
445
446 /* we are awake, now wait to disappear */
447 while (!kthread_should_stop()) {
448 /*
449 * This is an RT task, do short sleeps to let
450 * others run.
451 */
452 msleep(100);
453 }
454
455 return 0;
456}
457
458int
459trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
460{
461 unsigned long save_max = tracing_max_latency;
462 struct task_struct *p;
463 struct completion isrt;
464 unsigned long count;
465 int ret;
466
467 init_completion(&isrt);
468
469 /* create a high prio thread */
470 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
c7aafc54 471 if (IS_ERR(p)) {
60a11774
SR
472 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
473 return -1;
474 }
475
476 /* make sure the thread is running at an RT prio */
477 wait_for_completion(&isrt);
478
479 /* start the tracing */
480 tr->ctrl = 1;
481 trace->init(tr);
482 /* reset the max latency */
483 tracing_max_latency = 0;
484
485 /* sleep to let the RT thread sleep too */
486 msleep(100);
487
488 /*
489 * Yes this is slightly racy. It is possible that for some
490 * strange reason that the RT thread we created, did not
491 * call schedule for 100ms after doing the completion,
492 * and we do a wakeup on a task that already is awake.
493 * But that is extremely unlikely, and the worst thing that
494 * happens in such a case, is that we disable tracing.
495 * Honestly, if this race does happen something is horrible
496 * wrong with the system.
497 */
498
499 wake_up_process(p);
500
501 /* stop the tracing. */
502 tr->ctrl = 0;
503 trace->ctrl_update(tr);
504 /* check both trace buffers */
505 ret = trace_test_buffer(tr, NULL);
506 if (!ret)
507 ret = trace_test_buffer(&max_tr, &count);
508
509
510 trace->reset(tr);
511
512 tracing_max_latency = save_max;
513
514 /* kill the thread */
515 kthread_stop(p);
516
517 if (!ret && !count) {
518 printk(KERN_CONT ".. no entries found ..");
519 ret = -1;
520 }
521
522 return ret;
523}
524#endif /* CONFIG_SCHED_TRACER */
525
526#ifdef CONFIG_CONTEXT_SWITCH_TRACER
527int
528trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
529{
530 unsigned long count;
531 int ret;
532
533 /* start the tracing */
534 tr->ctrl = 1;
535 trace->init(tr);
536 /* Sleep for a 1/10 of a second */
537 msleep(100);
538 /* stop the tracing. */
539 tr->ctrl = 0;
540 trace->ctrl_update(tr);
541 /* check the trace buffer */
542 ret = trace_test_buffer(tr, &count);
543 trace->reset(tr);
544
545 if (!ret && !count) {
546 printk(KERN_CONT ".. no entries found ..");
547 ret = -1;
548 }
549
550 return ret;
551}
552#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
a6dd24f8
IM
553
554#ifdef CONFIG_SYSPROF_TRACER
555int
556trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
557{
558 unsigned long count;
559 int ret;
560
561 /* start the tracing */
562 tr->ctrl = 1;
563 trace->init(tr);
564 /* Sleep for a 1/10 of a second */
565 msleep(100);
566 /* stop the tracing. */
567 tr->ctrl = 0;
568 trace->ctrl_update(tr);
569 /* check the trace buffer */
570 ret = trace_test_buffer(tr, &count);
571 trace->reset(tr);
572
a6dd24f8
IM
573 return ret;
574}
575#endif /* CONFIG_SYSPROF_TRACER */