perf tools: Use __maybe_used for unused variables
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / tools / perf / builtin-test.c
1 /*
2 * builtin-test.c
3 *
4 * Builtin regression testing command: ever growing number of sanity tests
5 */
6 #include "builtin.h"
7
8 #include "util/cache.h"
9 #include "util/debug.h"
10 #include "util/debugfs.h"
11 #include "util/evlist.h"
12 #include "util/parse-options.h"
13 #include "util/parse-events.h"
14 #include "util/symbol.h"
15 #include "util/thread_map.h"
16 #include "util/pmu.h"
17 #include "../../include/linux/hw_breakpoint.h"
18
19 #include <sys/mman.h>
20
21 static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
22 struct symbol *sym)
23 {
24 bool *visited = symbol__priv(sym);
25 *visited = true;
26 return 0;
27 }
28
29 static int test__vmlinux_matches_kallsyms(void)
30 {
31 int err = -1;
32 struct rb_node *nd;
33 struct symbol *sym;
34 struct map *kallsyms_map, *vmlinux_map;
35 struct machine kallsyms, vmlinux;
36 enum map_type type = MAP__FUNCTION;
37 long page_size = sysconf(_SC_PAGE_SIZE);
38 struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
39
40 /*
41 * Step 1:
42 *
43 * Init the machines that will hold kernel, modules obtained from
44 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
45 */
46 machine__init(&kallsyms, "", HOST_KERNEL_ID);
47 machine__init(&vmlinux, "", HOST_KERNEL_ID);
48
49 /*
50 * Step 2:
51 *
52 * Create the kernel maps for kallsyms and the DSO where we will then
53 * load /proc/kallsyms. Also create the modules maps from /proc/modules
54 * and find the .ko files that match them in /lib/modules/`uname -r`/.
55 */
56 if (machine__create_kernel_maps(&kallsyms) < 0) {
57 pr_debug("machine__create_kernel_maps ");
58 return -1;
59 }
60
61 /*
62 * Step 3:
63 *
64 * Load and split /proc/kallsyms into multiple maps, one per module.
65 */
66 if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
67 pr_debug("dso__load_kallsyms ");
68 goto out;
69 }
70
71 /*
72 * Step 4:
73 *
74 * kallsyms will be internally on demand sorted by name so that we can
75 * find the reference relocation * symbol, i.e. the symbol we will use
76 * to see if the running kernel was relocated by checking if it has the
77 * same value in the vmlinux file we load.
78 */
79 kallsyms_map = machine__kernel_map(&kallsyms, type);
80
81 sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
82 if (sym == NULL) {
83 pr_debug("dso__find_symbol_by_name ");
84 goto out;
85 }
86
87 ref_reloc_sym.addr = sym->start;
88
89 /*
90 * Step 5:
91 *
92 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
93 */
94 if (machine__create_kernel_maps(&vmlinux) < 0) {
95 pr_debug("machine__create_kernel_maps ");
96 goto out;
97 }
98
99 vmlinux_map = machine__kernel_map(&vmlinux, type);
100 map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
101
102 /*
103 * Step 6:
104 *
105 * Locate a vmlinux file in the vmlinux path that has a buildid that
106 * matches the one of the running kernel.
107 *
108 * While doing that look if we find the ref reloc symbol, if we find it
109 * we'll have its ref_reloc_symbol.unrelocated_addr and then
110 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
111 * to fixup the symbols.
112 */
113 if (machine__load_vmlinux_path(&vmlinux, type,
114 vmlinux_matches_kallsyms_filter) <= 0) {
115 pr_debug("machine__load_vmlinux_path ");
116 goto out;
117 }
118
119 err = 0;
120 /*
121 * Step 7:
122 *
123 * Now look at the symbols in the vmlinux DSO and check if we find all of them
124 * in the kallsyms dso. For the ones that are in both, check its names and
125 * end addresses too.
126 */
127 for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
128 struct symbol *pair, *first_pair;
129 bool backwards = true;
130
131 sym = rb_entry(nd, struct symbol, rb_node);
132
133 if (sym->start == sym->end)
134 continue;
135
136 first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
137 pair = first_pair;
138
139 if (pair && pair->start == sym->start) {
140 next_pair:
141 if (strcmp(sym->name, pair->name) == 0) {
142 /*
143 * kallsyms don't have the symbol end, so we
144 * set that by using the next symbol start - 1,
145 * in some cases we get this up to a page
146 * wrong, trace_kmalloc when I was developing
147 * this code was one such example, 2106 bytes
148 * off the real size. More than that and we
149 * _really_ have a problem.
150 */
151 s64 skew = sym->end - pair->end;
152 if (llabs(skew) < page_size)
153 continue;
154
155 pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
156 sym->start, sym->name, sym->end, pair->end);
157 } else {
158 struct rb_node *nnd;
159 detour:
160 nnd = backwards ? rb_prev(&pair->rb_node) :
161 rb_next(&pair->rb_node);
162 if (nnd) {
163 struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
164
165 if (next->start == sym->start) {
166 pair = next;
167 goto next_pair;
168 }
169 }
170
171 if (backwards) {
172 backwards = false;
173 pair = first_pair;
174 goto detour;
175 }
176
177 pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
178 sym->start, sym->name, pair->name);
179 }
180 } else
181 pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
182
183 err = -1;
184 }
185
186 if (!verbose)
187 goto out;
188
189 pr_info("Maps only in vmlinux:\n");
190
191 for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
192 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
193 /*
194 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
195 * the kernel will have the path for the vmlinux file being used,
196 * so use the short name, less descriptive but the same ("[kernel]" in
197 * both cases.
198 */
199 pair = map_groups__find_by_name(&kallsyms.kmaps, type,
200 (pos->dso->kernel ?
201 pos->dso->short_name :
202 pos->dso->name));
203 if (pair)
204 pair->priv = 1;
205 else
206 map__fprintf(pos, stderr);
207 }
208
209 pr_info("Maps in vmlinux with a different name in kallsyms:\n");
210
211 for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
212 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
213
214 pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
215 if (pair == NULL || pair->priv)
216 continue;
217
218 if (pair->start == pos->start) {
219 pair->priv = 1;
220 pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
221 pos->start, pos->end, pos->pgoff, pos->dso->name);
222 if (pos->pgoff != pair->pgoff || pos->end != pair->end)
223 pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
224 pair->start, pair->end, pair->pgoff);
225 pr_info(" %s\n", pair->dso->name);
226 pair->priv = 1;
227 }
228 }
229
230 pr_info("Maps only in kallsyms:\n");
231
232 for (nd = rb_first(&kallsyms.kmaps.maps[type]);
233 nd; nd = rb_next(nd)) {
234 struct map *pos = rb_entry(nd, struct map, rb_node);
235
236 if (!pos->priv)
237 map__fprintf(pos, stderr);
238 }
239 out:
240 return err;
241 }
242
243 #include "util/cpumap.h"
244 #include "util/evsel.h"
245 #include <sys/types.h>
246
247 static int trace_event__id(const char *evname)
248 {
249 char *filename;
250 int err = -1, fd;
251
252 if (asprintf(&filename,
253 "%s/syscalls/%s/id",
254 tracing_events_path, evname) < 0)
255 return -1;
256
257 fd = open(filename, O_RDONLY);
258 if (fd >= 0) {
259 char id[16];
260 if (read(fd, id, sizeof(id)) > 0)
261 err = atoi(id);
262 close(fd);
263 }
264
265 free(filename);
266 return err;
267 }
268
269 static int test__open_syscall_event(void)
270 {
271 int err = -1, fd;
272 struct thread_map *threads;
273 struct perf_evsel *evsel;
274 struct perf_event_attr attr;
275 unsigned int nr_open_calls = 111, i;
276 int id = trace_event__id("sys_enter_open");
277
278 if (id < 0) {
279 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
280 return -1;
281 }
282
283 threads = thread_map__new(-1, getpid(), UINT_MAX);
284 if (threads == NULL) {
285 pr_debug("thread_map__new\n");
286 return -1;
287 }
288
289 memset(&attr, 0, sizeof(attr));
290 attr.type = PERF_TYPE_TRACEPOINT;
291 attr.config = id;
292 evsel = perf_evsel__new(&attr, 0);
293 if (evsel == NULL) {
294 pr_debug("perf_evsel__new\n");
295 goto out_thread_map_delete;
296 }
297
298 if (perf_evsel__open_per_thread(evsel, threads) < 0) {
299 pr_debug("failed to open counter: %s, "
300 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
301 strerror(errno));
302 goto out_evsel_delete;
303 }
304
305 for (i = 0; i < nr_open_calls; ++i) {
306 fd = open("/etc/passwd", O_RDONLY);
307 close(fd);
308 }
309
310 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
311 pr_debug("perf_evsel__read_on_cpu\n");
312 goto out_close_fd;
313 }
314
315 if (evsel->counts->cpu[0].val != nr_open_calls) {
316 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
317 nr_open_calls, evsel->counts->cpu[0].val);
318 goto out_close_fd;
319 }
320
321 err = 0;
322 out_close_fd:
323 perf_evsel__close_fd(evsel, 1, threads->nr);
324 out_evsel_delete:
325 perf_evsel__delete(evsel);
326 out_thread_map_delete:
327 thread_map__delete(threads);
328 return err;
329 }
330
331 #include <sched.h>
332
333 static int test__open_syscall_event_on_all_cpus(void)
334 {
335 int err = -1, fd, cpu;
336 struct thread_map *threads;
337 struct cpu_map *cpus;
338 struct perf_evsel *evsel;
339 struct perf_event_attr attr;
340 unsigned int nr_open_calls = 111, i;
341 cpu_set_t cpu_set;
342 int id = trace_event__id("sys_enter_open");
343
344 if (id < 0) {
345 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
346 return -1;
347 }
348
349 threads = thread_map__new(-1, getpid(), UINT_MAX);
350 if (threads == NULL) {
351 pr_debug("thread_map__new\n");
352 return -1;
353 }
354
355 cpus = cpu_map__new(NULL);
356 if (cpus == NULL) {
357 pr_debug("cpu_map__new\n");
358 goto out_thread_map_delete;
359 }
360
361
362 CPU_ZERO(&cpu_set);
363
364 memset(&attr, 0, sizeof(attr));
365 attr.type = PERF_TYPE_TRACEPOINT;
366 attr.config = id;
367 evsel = perf_evsel__new(&attr, 0);
368 if (evsel == NULL) {
369 pr_debug("perf_evsel__new\n");
370 goto out_thread_map_delete;
371 }
372
373 if (perf_evsel__open(evsel, cpus, threads) < 0) {
374 pr_debug("failed to open counter: %s, "
375 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
376 strerror(errno));
377 goto out_evsel_delete;
378 }
379
380 for (cpu = 0; cpu < cpus->nr; ++cpu) {
381 unsigned int ncalls = nr_open_calls + cpu;
382 /*
383 * XXX eventually lift this restriction in a way that
384 * keeps perf building on older glibc installations
385 * without CPU_ALLOC. 1024 cpus in 2010 still seems
386 * a reasonable upper limit tho :-)
387 */
388 if (cpus->map[cpu] >= CPU_SETSIZE) {
389 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
390 continue;
391 }
392
393 CPU_SET(cpus->map[cpu], &cpu_set);
394 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
395 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
396 cpus->map[cpu],
397 strerror(errno));
398 goto out_close_fd;
399 }
400 for (i = 0; i < ncalls; ++i) {
401 fd = open("/etc/passwd", O_RDONLY);
402 close(fd);
403 }
404 CPU_CLR(cpus->map[cpu], &cpu_set);
405 }
406
407 /*
408 * Here we need to explicitely preallocate the counts, as if
409 * we use the auto allocation it will allocate just for 1 cpu,
410 * as we start by cpu 0.
411 */
412 if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
413 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
414 goto out_close_fd;
415 }
416
417 err = 0;
418
419 for (cpu = 0; cpu < cpus->nr; ++cpu) {
420 unsigned int expected;
421
422 if (cpus->map[cpu] >= CPU_SETSIZE)
423 continue;
424
425 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
426 pr_debug("perf_evsel__read_on_cpu\n");
427 err = -1;
428 break;
429 }
430
431 expected = nr_open_calls + cpu;
432 if (evsel->counts->cpu[cpu].val != expected) {
433 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
434 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
435 err = -1;
436 }
437 }
438
439 out_close_fd:
440 perf_evsel__close_fd(evsel, 1, threads->nr);
441 out_evsel_delete:
442 perf_evsel__delete(evsel);
443 out_thread_map_delete:
444 thread_map__delete(threads);
445 return err;
446 }
447
448 /*
449 * This test will generate random numbers of calls to some getpid syscalls,
450 * then establish an mmap for a group of events that are created to monitor
451 * the syscalls.
452 *
453 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
454 * sample.id field to map back to its respective perf_evsel instance.
455 *
456 * Then it checks if the number of syscalls reported as perf events by
457 * the kernel corresponds to the number of syscalls made.
458 */
459 static int test__basic_mmap(void)
460 {
461 int err = -1;
462 union perf_event *event;
463 struct thread_map *threads;
464 struct cpu_map *cpus;
465 struct perf_evlist *evlist;
466 struct perf_event_attr attr = {
467 .type = PERF_TYPE_TRACEPOINT,
468 .read_format = PERF_FORMAT_ID,
469 .sample_type = PERF_SAMPLE_ID,
470 .watermark = 0,
471 };
472 cpu_set_t cpu_set;
473 const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
474 "getpgid", };
475 pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
476 (void*)getpgid };
477 #define nsyscalls ARRAY_SIZE(syscall_names)
478 int ids[nsyscalls];
479 unsigned int nr_events[nsyscalls],
480 expected_nr_events[nsyscalls], i, j;
481 struct perf_evsel *evsels[nsyscalls], *evsel;
482
483 for (i = 0; i < nsyscalls; ++i) {
484 char name[64];
485
486 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
487 ids[i] = trace_event__id(name);
488 if (ids[i] < 0) {
489 pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
490 return -1;
491 }
492 nr_events[i] = 0;
493 expected_nr_events[i] = random() % 257;
494 }
495
496 threads = thread_map__new(-1, getpid(), UINT_MAX);
497 if (threads == NULL) {
498 pr_debug("thread_map__new\n");
499 return -1;
500 }
501
502 cpus = cpu_map__new(NULL);
503 if (cpus == NULL) {
504 pr_debug("cpu_map__new\n");
505 goto out_free_threads;
506 }
507
508 CPU_ZERO(&cpu_set);
509 CPU_SET(cpus->map[0], &cpu_set);
510 sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
511 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
512 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
513 cpus->map[0], strerror(errno));
514 goto out_free_cpus;
515 }
516
517 evlist = perf_evlist__new(cpus, threads);
518 if (evlist == NULL) {
519 pr_debug("perf_evlist__new\n");
520 goto out_free_cpus;
521 }
522
523 /* anonymous union fields, can't be initialized above */
524 attr.wakeup_events = 1;
525 attr.sample_period = 1;
526
527 for (i = 0; i < nsyscalls; ++i) {
528 attr.config = ids[i];
529 evsels[i] = perf_evsel__new(&attr, i);
530 if (evsels[i] == NULL) {
531 pr_debug("perf_evsel__new\n");
532 goto out_free_evlist;
533 }
534
535 perf_evlist__add(evlist, evsels[i]);
536
537 if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
538 pr_debug("failed to open counter: %s, "
539 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
540 strerror(errno));
541 goto out_close_fd;
542 }
543 }
544
545 if (perf_evlist__mmap(evlist, 128, true) < 0) {
546 pr_debug("failed to mmap events: %d (%s)\n", errno,
547 strerror(errno));
548 goto out_close_fd;
549 }
550
551 for (i = 0; i < nsyscalls; ++i)
552 for (j = 0; j < expected_nr_events[i]; ++j) {
553 int foo = syscalls[i]();
554 ++foo;
555 }
556
557 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
558 struct perf_sample sample;
559
560 if (event->header.type != PERF_RECORD_SAMPLE) {
561 pr_debug("unexpected %s event\n",
562 perf_event__name(event->header.type));
563 goto out_munmap;
564 }
565
566 err = perf_evlist__parse_sample(evlist, event, &sample, false);
567 if (err) {
568 pr_err("Can't parse sample, err = %d\n", err);
569 goto out_munmap;
570 }
571
572 evsel = perf_evlist__id2evsel(evlist, sample.id);
573 if (evsel == NULL) {
574 pr_debug("event with id %" PRIu64
575 " doesn't map to an evsel\n", sample.id);
576 goto out_munmap;
577 }
578 nr_events[evsel->idx]++;
579 }
580
581 list_for_each_entry(evsel, &evlist->entries, node) {
582 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
583 pr_debug("expected %d %s events, got %d\n",
584 expected_nr_events[evsel->idx],
585 perf_evsel__name(evsel), nr_events[evsel->idx]);
586 goto out_munmap;
587 }
588 }
589
590 err = 0;
591 out_munmap:
592 perf_evlist__munmap(evlist);
593 out_close_fd:
594 for (i = 0; i < nsyscalls; ++i)
595 perf_evsel__close_fd(evsels[i], 1, threads->nr);
596 out_free_evlist:
597 perf_evlist__delete(evlist);
598 out_free_cpus:
599 cpu_map__delete(cpus);
600 out_free_threads:
601 thread_map__delete(threads);
602 return err;
603 #undef nsyscalls
604 }
605
606 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
607 size_t *sizep)
608 {
609 cpu_set_t *mask;
610 size_t size;
611 int i, cpu = -1, nrcpus = 1024;
612 realloc:
613 mask = CPU_ALLOC(nrcpus);
614 size = CPU_ALLOC_SIZE(nrcpus);
615 CPU_ZERO_S(size, mask);
616
617 if (sched_getaffinity(pid, size, mask) == -1) {
618 CPU_FREE(mask);
619 if (errno == EINVAL && nrcpus < (1024 << 8)) {
620 nrcpus = nrcpus << 2;
621 goto realloc;
622 }
623 perror("sched_getaffinity");
624 return -1;
625 }
626
627 for (i = 0; i < nrcpus; i++) {
628 if (CPU_ISSET_S(i, size, mask)) {
629 if (cpu == -1) {
630 cpu = i;
631 *maskp = mask;
632 *sizep = size;
633 } else
634 CPU_CLR_S(i, size, mask);
635 }
636 }
637
638 if (cpu == -1)
639 CPU_FREE(mask);
640
641 return cpu;
642 }
643
644 static int test__PERF_RECORD(void)
645 {
646 struct perf_record_opts opts = {
647 .target = {
648 .uid = UINT_MAX,
649 .uses_mmap = true,
650 },
651 .no_delay = true,
652 .freq = 10,
653 .mmap_pages = 256,
654 };
655 cpu_set_t *cpu_mask = NULL;
656 size_t cpu_mask_size = 0;
657 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
658 struct perf_evsel *evsel;
659 struct perf_sample sample;
660 const char *cmd = "sleep";
661 const char *argv[] = { cmd, "1", NULL, };
662 char *bname;
663 u64 prev_time = 0;
664 bool found_cmd_mmap = false,
665 found_libc_mmap = false,
666 found_vdso_mmap = false,
667 found_ld_mmap = false;
668 int err = -1, errs = 0, i, wakeups = 0;
669 u32 cpu;
670 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
671
672 if (evlist == NULL || argv == NULL) {
673 pr_debug("Not enough memory to create evlist\n");
674 goto out;
675 }
676
677 /*
678 * We need at least one evsel in the evlist, use the default
679 * one: "cycles".
680 */
681 err = perf_evlist__add_default(evlist);
682 if (err < 0) {
683 pr_debug("Not enough memory to create evsel\n");
684 goto out_delete_evlist;
685 }
686
687 /*
688 * Create maps of threads and cpus to monitor. In this case
689 * we start with all threads and cpus (-1, -1) but then in
690 * perf_evlist__prepare_workload we'll fill in the only thread
691 * we're monitoring, the one forked there.
692 */
693 err = perf_evlist__create_maps(evlist, &opts.target);
694 if (err < 0) {
695 pr_debug("Not enough memory to create thread/cpu maps\n");
696 goto out_delete_evlist;
697 }
698
699 /*
700 * Prepare the workload in argv[] to run, it'll fork it, and then wait
701 * for perf_evlist__start_workload() to exec it. This is done this way
702 * so that we have time to open the evlist (calling sys_perf_event_open
703 * on all the fds) and then mmap them.
704 */
705 err = perf_evlist__prepare_workload(evlist, &opts, argv);
706 if (err < 0) {
707 pr_debug("Couldn't run the workload!\n");
708 goto out_delete_evlist;
709 }
710
711 /*
712 * Config the evsels, setting attr->comm on the first one, etc.
713 */
714 evsel = perf_evlist__first(evlist);
715 evsel->attr.sample_type |= PERF_SAMPLE_CPU;
716 evsel->attr.sample_type |= PERF_SAMPLE_TID;
717 evsel->attr.sample_type |= PERF_SAMPLE_TIME;
718 perf_evlist__config_attrs(evlist, &opts);
719
720 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
721 &cpu_mask_size);
722 if (err < 0) {
723 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
724 goto out_delete_evlist;
725 }
726
727 cpu = err;
728
729 /*
730 * So that we can check perf_sample.cpu on all the samples.
731 */
732 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
733 pr_debug("sched_setaffinity: %s\n", strerror(errno));
734 goto out_free_cpu_mask;
735 }
736
737 /*
738 * Call sys_perf_event_open on all the fds on all the evsels,
739 * grouping them if asked to.
740 */
741 err = perf_evlist__open(evlist);
742 if (err < 0) {
743 pr_debug("perf_evlist__open: %s\n", strerror(errno));
744 goto out_delete_evlist;
745 }
746
747 /*
748 * mmap the first fd on a given CPU and ask for events for the other
749 * fds in the same CPU to be injected in the same mmap ring buffer
750 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
751 */
752 err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
753 if (err < 0) {
754 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
755 goto out_delete_evlist;
756 }
757
758 /*
759 * Now that all is properly set up, enable the events, they will
760 * count just on workload.pid, which will start...
761 */
762 perf_evlist__enable(evlist);
763
764 /*
765 * Now!
766 */
767 perf_evlist__start_workload(evlist);
768
769 while (1) {
770 int before = total_events;
771
772 for (i = 0; i < evlist->nr_mmaps; i++) {
773 union perf_event *event;
774
775 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
776 const u32 type = event->header.type;
777 const char *name = perf_event__name(type);
778
779 ++total_events;
780 if (type < PERF_RECORD_MAX)
781 nr_events[type]++;
782
783 err = perf_evlist__parse_sample(evlist, event, &sample, false);
784 if (err < 0) {
785 if (verbose)
786 perf_event__fprintf(event, stderr);
787 pr_debug("Couldn't parse sample\n");
788 goto out_err;
789 }
790
791 if (verbose) {
792 pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
793 perf_event__fprintf(event, stderr);
794 }
795
796 if (prev_time > sample.time) {
797 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
798 name, prev_time, sample.time);
799 ++errs;
800 }
801
802 prev_time = sample.time;
803
804 if (sample.cpu != cpu) {
805 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
806 name, cpu, sample.cpu);
807 ++errs;
808 }
809
810 if ((pid_t)sample.pid != evlist->workload.pid) {
811 pr_debug("%s with unexpected pid, expected %d, got %d\n",
812 name, evlist->workload.pid, sample.pid);
813 ++errs;
814 }
815
816 if ((pid_t)sample.tid != evlist->workload.pid) {
817 pr_debug("%s with unexpected tid, expected %d, got %d\n",
818 name, evlist->workload.pid, sample.tid);
819 ++errs;
820 }
821
822 if ((type == PERF_RECORD_COMM ||
823 type == PERF_RECORD_MMAP ||
824 type == PERF_RECORD_FORK ||
825 type == PERF_RECORD_EXIT) &&
826 (pid_t)event->comm.pid != evlist->workload.pid) {
827 pr_debug("%s with unexpected pid/tid\n", name);
828 ++errs;
829 }
830
831 if ((type == PERF_RECORD_COMM ||
832 type == PERF_RECORD_MMAP) &&
833 event->comm.pid != event->comm.tid) {
834 pr_debug("%s with different pid/tid!\n", name);
835 ++errs;
836 }
837
838 switch (type) {
839 case PERF_RECORD_COMM:
840 if (strcmp(event->comm.comm, cmd)) {
841 pr_debug("%s with unexpected comm!\n", name);
842 ++errs;
843 }
844 break;
845 case PERF_RECORD_EXIT:
846 goto found_exit;
847 case PERF_RECORD_MMAP:
848 bname = strrchr(event->mmap.filename, '/');
849 if (bname != NULL) {
850 if (!found_cmd_mmap)
851 found_cmd_mmap = !strcmp(bname + 1, cmd);
852 if (!found_libc_mmap)
853 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
854 if (!found_ld_mmap)
855 found_ld_mmap = !strncmp(bname + 1, "ld", 2);
856 } else if (!found_vdso_mmap)
857 found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
858 break;
859
860 case PERF_RECORD_SAMPLE:
861 /* Just ignore samples for now */
862 break;
863 default:
864 pr_debug("Unexpected perf_event->header.type %d!\n",
865 type);
866 ++errs;
867 }
868 }
869 }
870
871 /*
872 * We don't use poll here because at least at 3.1 times the
873 * PERF_RECORD_{!SAMPLE} events don't honour
874 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
875 */
876 if (total_events == before && false)
877 poll(evlist->pollfd, evlist->nr_fds, -1);
878
879 sleep(1);
880 if (++wakeups > 5) {
881 pr_debug("No PERF_RECORD_EXIT event!\n");
882 break;
883 }
884 }
885
886 found_exit:
887 if (nr_events[PERF_RECORD_COMM] > 1) {
888 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
889 ++errs;
890 }
891
892 if (nr_events[PERF_RECORD_COMM] == 0) {
893 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
894 ++errs;
895 }
896
897 if (!found_cmd_mmap) {
898 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
899 ++errs;
900 }
901
902 if (!found_libc_mmap) {
903 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
904 ++errs;
905 }
906
907 if (!found_ld_mmap) {
908 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
909 ++errs;
910 }
911
912 if (!found_vdso_mmap) {
913 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
914 ++errs;
915 }
916 out_err:
917 perf_evlist__munmap(evlist);
918 out_free_cpu_mask:
919 CPU_FREE(cpu_mask);
920 out_delete_evlist:
921 perf_evlist__delete(evlist);
922 out:
923 return (err < 0 || errs > 0) ? -1 : 0;
924 }
925
926
927 #if defined(__x86_64__) || defined(__i386__)
928
929 #define barrier() asm volatile("" ::: "memory")
930
931 static u64 rdpmc(unsigned int counter)
932 {
933 unsigned int low, high;
934
935 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
936
937 return low | ((u64)high) << 32;
938 }
939
940 static u64 rdtsc(void)
941 {
942 unsigned int low, high;
943
944 asm volatile("rdtsc" : "=a" (low), "=d" (high));
945
946 return low | ((u64)high) << 32;
947 }
948
949 static u64 mmap_read_self(void *addr)
950 {
951 struct perf_event_mmap_page *pc = addr;
952 u32 seq, idx, time_mult = 0, time_shift = 0;
953 u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
954
955 do {
956 seq = pc->lock;
957 barrier();
958
959 enabled = pc->time_enabled;
960 running = pc->time_running;
961
962 if (enabled != running) {
963 cyc = rdtsc();
964 time_mult = pc->time_mult;
965 time_shift = pc->time_shift;
966 time_offset = pc->time_offset;
967 }
968
969 idx = pc->index;
970 count = pc->offset;
971 if (idx)
972 count += rdpmc(idx - 1);
973
974 barrier();
975 } while (pc->lock != seq);
976
977 if (enabled != running) {
978 u64 quot, rem;
979
980 quot = (cyc >> time_shift);
981 rem = cyc & ((1 << time_shift) - 1);
982 delta = time_offset + quot * time_mult +
983 ((rem * time_mult) >> time_shift);
984
985 enabled += delta;
986 if (idx)
987 running += delta;
988
989 quot = count / running;
990 rem = count % running;
991 count = quot * enabled + (rem * enabled) / running;
992 }
993
994 return count;
995 }
996
997 /*
998 * If the RDPMC instruction faults then signal this back to the test parent task:
999 */
1000 static void segfault_handler(int sig __maybe_unused,
1001 siginfo_t *info __maybe_unused,
1002 void *uc __maybe_unused)
1003 {
1004 exit(-1);
1005 }
1006
1007 static int __test__rdpmc(void)
1008 {
1009 long page_size = sysconf(_SC_PAGE_SIZE);
1010 volatile int tmp = 0;
1011 u64 i, loops = 1000;
1012 int n;
1013 int fd;
1014 void *addr;
1015 struct perf_event_attr attr = {
1016 .type = PERF_TYPE_HARDWARE,
1017 .config = PERF_COUNT_HW_INSTRUCTIONS,
1018 .exclude_kernel = 1,
1019 };
1020 u64 delta_sum = 0;
1021 struct sigaction sa;
1022
1023 sigfillset(&sa.sa_mask);
1024 sa.sa_sigaction = segfault_handler;
1025 sigaction(SIGSEGV, &sa, NULL);
1026
1027 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
1028 if (fd < 0) {
1029 pr_debug("Error: sys_perf_event_open() syscall returned "
1030 "with %d (%s)\n", fd, strerror(errno));
1031 return -1;
1032 }
1033
1034 addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
1035 if (addr == (void *)(-1)) {
1036 pr_debug("Error: mmap() syscall returned with (%s)\n",
1037 strerror(errno));
1038 goto out_close;
1039 }
1040
1041 for (n = 0; n < 6; n++) {
1042 u64 stamp, now, delta;
1043
1044 stamp = mmap_read_self(addr);
1045
1046 for (i = 0; i < loops; i++)
1047 tmp++;
1048
1049 now = mmap_read_self(addr);
1050 loops *= 10;
1051
1052 delta = now - stamp;
1053 pr_debug("%14d: %14Lu\n", n, (long long)delta);
1054
1055 delta_sum += delta;
1056 }
1057
1058 munmap(addr, page_size);
1059 pr_debug(" ");
1060 out_close:
1061 close(fd);
1062
1063 if (!delta_sum)
1064 return -1;
1065
1066 return 0;
1067 }
1068
1069 static int test__rdpmc(void)
1070 {
1071 int status = 0;
1072 int wret = 0;
1073 int ret;
1074 int pid;
1075
1076 pid = fork();
1077 if (pid < 0)
1078 return -1;
1079
1080 if (!pid) {
1081 ret = __test__rdpmc();
1082
1083 exit(ret);
1084 }
1085
1086 wret = waitpid(pid, &status, 0);
1087 if (wret < 0 || status)
1088 return -1;
1089
1090 return 0;
1091 }
1092
1093 #endif
1094
1095 static int test__perf_pmu(void)
1096 {
1097 return perf_pmu__test();
1098 }
1099
1100 static int perf_evsel__roundtrip_cache_name_test(void)
1101 {
1102 char name[128];
1103 int type, op, err = 0, ret = 0, i, idx;
1104 struct perf_evsel *evsel;
1105 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
1106
1107 if (evlist == NULL)
1108 return -ENOMEM;
1109
1110 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
1111 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
1112 /* skip invalid cache type */
1113 if (!perf_evsel__is_cache_op_valid(type, op))
1114 continue;
1115
1116 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
1117 __perf_evsel__hw_cache_type_op_res_name(type, op, i,
1118 name, sizeof(name));
1119 err = parse_events(evlist, name, 0);
1120 if (err)
1121 ret = err;
1122 }
1123 }
1124 }
1125
1126 idx = 0;
1127 evsel = perf_evlist__first(evlist);
1128
1129 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
1130 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
1131 /* skip invalid cache type */
1132 if (!perf_evsel__is_cache_op_valid(type, op))
1133 continue;
1134
1135 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
1136 __perf_evsel__hw_cache_type_op_res_name(type, op, i,
1137 name, sizeof(name));
1138 if (evsel->idx != idx)
1139 continue;
1140
1141 ++idx;
1142
1143 if (strcmp(perf_evsel__name(evsel), name)) {
1144 pr_debug("%s != %s\n", perf_evsel__name(evsel), name);
1145 ret = -1;
1146 }
1147
1148 evsel = perf_evsel__next(evsel);
1149 }
1150 }
1151 }
1152
1153 perf_evlist__delete(evlist);
1154 return ret;
1155 }
1156
1157 static int __perf_evsel__name_array_test(const char *names[], int nr_names)
1158 {
1159 int i, err;
1160 struct perf_evsel *evsel;
1161 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
1162
1163 if (evlist == NULL)
1164 return -ENOMEM;
1165
1166 for (i = 0; i < nr_names; ++i) {
1167 err = parse_events(evlist, names[i], 0);
1168 if (err) {
1169 pr_debug("failed to parse event '%s', err %d\n",
1170 names[i], err);
1171 goto out_delete_evlist;
1172 }
1173 }
1174
1175 err = 0;
1176 list_for_each_entry(evsel, &evlist->entries, node) {
1177 if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
1178 --err;
1179 pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
1180 }
1181 }
1182
1183 out_delete_evlist:
1184 perf_evlist__delete(evlist);
1185 return err;
1186 }
1187
1188 #define perf_evsel__name_array_test(names) \
1189 __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
1190
1191 static int perf_evsel__roundtrip_name_test(void)
1192 {
1193 int err = 0, ret = 0;
1194
1195 err = perf_evsel__name_array_test(perf_evsel__hw_names);
1196 if (err)
1197 ret = err;
1198
1199 err = perf_evsel__name_array_test(perf_evsel__sw_names);
1200 if (err)
1201 ret = err;
1202
1203 err = perf_evsel__roundtrip_cache_name_test();
1204 if (err)
1205 ret = err;
1206
1207 return ret;
1208 }
1209
1210 static struct test {
1211 const char *desc;
1212 int (*func)(void);
1213 } tests[] = {
1214 {
1215 .desc = "vmlinux symtab matches kallsyms",
1216 .func = test__vmlinux_matches_kallsyms,
1217 },
1218 {
1219 .desc = "detect open syscall event",
1220 .func = test__open_syscall_event,
1221 },
1222 {
1223 .desc = "detect open syscall event on all cpus",
1224 .func = test__open_syscall_event_on_all_cpus,
1225 },
1226 {
1227 .desc = "read samples using the mmap interface",
1228 .func = test__basic_mmap,
1229 },
1230 {
1231 .desc = "parse events tests",
1232 .func = parse_events__test,
1233 },
1234 #if defined(__x86_64__) || defined(__i386__)
1235 {
1236 .desc = "x86 rdpmc test",
1237 .func = test__rdpmc,
1238 },
1239 #endif
1240 {
1241 .desc = "Validate PERF_RECORD_* events & perf_sample fields",
1242 .func = test__PERF_RECORD,
1243 },
1244 {
1245 .desc = "Test perf pmu format parsing",
1246 .func = test__perf_pmu,
1247 },
1248 {
1249 .desc = "Test dso data interface",
1250 .func = dso__test_data,
1251 },
1252 {
1253 .desc = "roundtrip evsel->name check",
1254 .func = perf_evsel__roundtrip_name_test,
1255 },
1256 {
1257 .func = NULL,
1258 },
1259 };
1260
1261 static bool perf_test__matches(int curr, int argc, const char *argv[])
1262 {
1263 int i;
1264
1265 if (argc == 0)
1266 return true;
1267
1268 for (i = 0; i < argc; ++i) {
1269 char *end;
1270 long nr = strtoul(argv[i], &end, 10);
1271
1272 if (*end == '\0') {
1273 if (nr == curr + 1)
1274 return true;
1275 continue;
1276 }
1277
1278 if (strstr(tests[curr].desc, argv[i]))
1279 return true;
1280 }
1281
1282 return false;
1283 }
1284
1285 static int __cmd_test(int argc, const char *argv[])
1286 {
1287 int i = 0;
1288
1289 while (tests[i].func) {
1290 int curr = i++, err;
1291
1292 if (!perf_test__matches(curr, argc, argv))
1293 continue;
1294
1295 pr_info("%2d: %s:", i, tests[curr].desc);
1296 pr_debug("\n--- start ---\n");
1297 err = tests[curr].func();
1298 pr_debug("---- end ----\n%s:", tests[curr].desc);
1299 pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
1300 }
1301
1302 return 0;
1303 }
1304
1305 static int perf_test__list(int argc, const char **argv)
1306 {
1307 int i = 0;
1308
1309 while (tests[i].func) {
1310 int curr = i++;
1311
1312 if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
1313 continue;
1314
1315 pr_info("%2d: %s\n", i, tests[curr].desc);
1316 }
1317
1318 return 0;
1319 }
1320
1321 int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
1322 {
1323 const char * const test_usage[] = {
1324 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
1325 NULL,
1326 };
1327 const struct option test_options[] = {
1328 OPT_INCR('v', "verbose", &verbose,
1329 "be more verbose (show symbol address, etc)"),
1330 OPT_END()
1331 };
1332
1333 argc = parse_options(argc, argv, test_options, test_usage, 0);
1334 if (argc >= 1 && !strcmp(argv[0], "list"))
1335 return perf_test__list(argc, argv);
1336
1337 symbol_conf.priv_size = sizeof(int);
1338 symbol_conf.sort_by_name = true;
1339 symbol_conf.try_vmlinux_path = true;
1340
1341 if (symbol__init() < 0)
1342 return -1;
1343
1344 return __cmd_test(argc, argv);
1345 }