perf report: Split out event processing helpers
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / Documentation / perf_counter / builtin-report.c
1 /*
2 * builtin-report.c
3 *
4 * Builtin report command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
7 */
8 #include "builtin.h"
9
10 #include "util/util.h"
11
12 #include "util/list.h"
13 #include "util/cache.h"
14 #include "util/rbtree.h"
15 #include "util/symbol.h"
16 #include "util/string.h"
17
18 #include "perf.h"
19
20 #include "util/parse-options.h"
21 #include "util/parse-events.h"
22
23 #define SHOW_KERNEL 1
24 #define SHOW_USER 2
25 #define SHOW_HV 4
26
27 static char const *input_name = "perf.data";
28 static char *vmlinux = NULL;
29 static char *sort_order = "comm,dso";
30 static int input;
31 static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
32
33 static int dump_trace = 0;
34 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
35
36 static int verbose;
37 static int full_paths;
38
39 static unsigned long page_size;
40 static unsigned long mmap_window = 32;
41
42 const char *perf_event_names[] = {
43 [PERF_EVENT_MMAP] = " PERF_EVENT_MMAP",
44 [PERF_EVENT_MUNMAP] = " PERF_EVENT_MUNMAP",
45 [PERF_EVENT_COMM] = " PERF_EVENT_COMM",
46 };
47
48 struct ip_event {
49 struct perf_event_header header;
50 __u64 ip;
51 __u32 pid, tid;
52 };
53
54 struct mmap_event {
55 struct perf_event_header header;
56 __u32 pid, tid;
57 __u64 start;
58 __u64 len;
59 __u64 pgoff;
60 char filename[PATH_MAX];
61 };
62
63 struct comm_event {
64 struct perf_event_header header;
65 __u32 pid, tid;
66 char comm[16];
67 };
68
69 typedef union event_union {
70 struct perf_event_header header;
71 struct ip_event ip;
72 struct mmap_event mmap;
73 struct comm_event comm;
74 } event_t;
75
76 static LIST_HEAD(dsos);
77 static struct dso *kernel_dso;
78
79 static void dsos__add(struct dso *dso)
80 {
81 list_add_tail(&dso->node, &dsos);
82 }
83
84 static struct dso *dsos__find(const char *name)
85 {
86 struct dso *pos;
87
88 list_for_each_entry(pos, &dsos, node)
89 if (strcmp(pos->name, name) == 0)
90 return pos;
91 return NULL;
92 }
93
94 static struct dso *dsos__findnew(const char *name)
95 {
96 struct dso *dso = dsos__find(name);
97 int nr;
98
99 if (dso)
100 return dso;
101
102 dso = dso__new(name, 0);
103 if (!dso)
104 goto out_delete_dso;
105
106 nr = dso__load(dso, NULL);
107 if (nr < 0) {
108 fprintf(stderr, "Failed to open: %s\n", name);
109 goto out_delete_dso;
110 }
111 if (!nr && verbose) {
112 fprintf(stderr,
113 "No symbols found in: %s, maybe install a debug package?\n",
114 name);
115 }
116
117 dsos__add(dso);
118
119 return dso;
120
121 out_delete_dso:
122 dso__delete(dso);
123 return NULL;
124 }
125
126 static void dsos__fprintf(FILE *fp)
127 {
128 struct dso *pos;
129
130 list_for_each_entry(pos, &dsos, node)
131 dso__fprintf(pos, fp);
132 }
133
134 static int load_kernel(void)
135 {
136 int err;
137
138 kernel_dso = dso__new("[kernel]", 0);
139 if (!kernel_dso)
140 return -1;
141
142 err = dso__load_kernel(kernel_dso, vmlinux, NULL);
143 if (err) {
144 dso__delete(kernel_dso);
145 kernel_dso = NULL;
146 } else
147 dsos__add(kernel_dso);
148
149 return err;
150 }
151
152 static char __cwd[PATH_MAX];
153 static char *cwd = __cwd;
154 static int cwdlen;
155
156 static int strcommon(const char *pathname)
157 {
158 int n = 0;
159
160 while (pathname[n] == cwd[n] && n < cwdlen)
161 ++n;
162
163 return n;
164 }
165
166 struct map {
167 struct list_head node;
168 uint64_t start;
169 uint64_t end;
170 uint64_t pgoff;
171 struct dso *dso;
172 };
173
174 static struct map *map__new(struct mmap_event *event)
175 {
176 struct map *self = malloc(sizeof(*self));
177
178 if (self != NULL) {
179 const char *filename = event->filename;
180 char newfilename[PATH_MAX];
181
182 if (cwd) {
183 int n = strcommon(filename);
184
185 if (n == cwdlen) {
186 snprintf(newfilename, sizeof(newfilename),
187 ".%s", filename + n);
188 filename = newfilename;
189 }
190 }
191
192 self->start = event->start;
193 self->end = event->start + event->len;
194 self->pgoff = event->pgoff;
195
196 self->dso = dsos__findnew(filename);
197 if (self->dso == NULL)
198 goto out_delete;
199 }
200 return self;
201 out_delete:
202 free(self);
203 return NULL;
204 }
205
206 struct thread;
207
208 struct thread {
209 struct rb_node rb_node;
210 struct list_head maps;
211 pid_t pid;
212 char *comm;
213 };
214
215 static struct thread *thread__new(pid_t pid)
216 {
217 struct thread *self = malloc(sizeof(*self));
218
219 if (self != NULL) {
220 self->pid = pid;
221 self->comm = malloc(32);
222 if (self->comm)
223 snprintf(self->comm, 32, ":%d", self->pid);
224 INIT_LIST_HEAD(&self->maps);
225 }
226
227 return self;
228 }
229
230 static int thread__set_comm(struct thread *self, const char *comm)
231 {
232 if (self->comm)
233 free(self->comm);
234 self->comm = strdup(comm);
235 return self->comm ? 0 : -ENOMEM;
236 }
237
238 static struct rb_root threads;
239 static struct thread *last_match;
240
241 static struct thread *threads__findnew(pid_t pid)
242 {
243 struct rb_node **p = &threads.rb_node;
244 struct rb_node *parent = NULL;
245 struct thread *th;
246
247 /*
248 * Font-end cache - PID lookups come in blocks,
249 * so most of the time we dont have to look up
250 * the full rbtree:
251 */
252 if (last_match && last_match->pid == pid)
253 return last_match;
254
255 while (*p != NULL) {
256 parent = *p;
257 th = rb_entry(parent, struct thread, rb_node);
258
259 if (th->pid == pid) {
260 last_match = th;
261 return th;
262 }
263
264 if (pid < th->pid)
265 p = &(*p)->rb_left;
266 else
267 p = &(*p)->rb_right;
268 }
269
270 th = thread__new(pid);
271 if (th != NULL) {
272 rb_link_node(&th->rb_node, parent, p);
273 rb_insert_color(&th->rb_node, &threads);
274 last_match = th;
275 }
276
277 return th;
278 }
279
280 static void thread__insert_map(struct thread *self, struct map *map)
281 {
282 list_add_tail(&map->node, &self->maps);
283 }
284
285 static struct map *thread__find_map(struct thread *self, uint64_t ip)
286 {
287 struct map *pos;
288
289 if (self == NULL)
290 return NULL;
291
292 list_for_each_entry(pos, &self->maps, node)
293 if (ip >= pos->start && ip <= pos->end)
294 return pos;
295
296 return NULL;
297 }
298
299 /*
300 * histogram, sorted on item, collects counts
301 */
302
303 static struct rb_root hist;
304
305 struct hist_entry {
306 struct rb_node rb_node;
307
308 struct thread *thread;
309 struct map *map;
310 struct dso *dso;
311 struct symbol *sym;
312 uint64_t ip;
313 char level;
314
315 uint32_t count;
316 };
317
318 /*
319 * configurable sorting bits
320 */
321
322 struct sort_entry {
323 struct list_head list;
324
325 char *header;
326
327 int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
328 int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
329 size_t (*print)(FILE *fp, struct hist_entry *);
330 };
331
332 /* --sort pid */
333
334 static int64_t
335 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
336 {
337 return right->thread->pid - left->thread->pid;
338 }
339
340 static size_t
341 sort__thread_print(FILE *fp, struct hist_entry *self)
342 {
343 return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid);
344 }
345
346 static struct sort_entry sort_thread = {
347 .header = " Command: Pid ",
348 .cmp = sort__thread_cmp,
349 .print = sort__thread_print,
350 };
351
352 /* --sort comm */
353
354 static int64_t
355 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
356 {
357 return right->thread->pid - left->thread->pid;
358 }
359
360 static int64_t
361 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
362 {
363 char *comm_l = left->thread->comm;
364 char *comm_r = right->thread->comm;
365
366 if (!comm_l || !comm_r) {
367 if (!comm_l && !comm_r)
368 return 0;
369 else if (!comm_l)
370 return -1;
371 else
372 return 1;
373 }
374
375 return strcmp(comm_l, comm_r);
376 }
377
378 static size_t
379 sort__comm_print(FILE *fp, struct hist_entry *self)
380 {
381 return fprintf(fp, " %16s", self->thread->comm);
382 }
383
384 static struct sort_entry sort_comm = {
385 .header = " Command",
386 .cmp = sort__comm_cmp,
387 .collapse = sort__comm_collapse,
388 .print = sort__comm_print,
389 };
390
391 /* --sort dso */
392
393 static int64_t
394 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
395 {
396 struct dso *dso_l = left->dso;
397 struct dso *dso_r = right->dso;
398
399 if (!dso_l || !dso_r) {
400 if (!dso_l && !dso_r)
401 return 0;
402 else if (!dso_l)
403 return -1;
404 else
405 return 1;
406 }
407
408 return strcmp(dso_l->name, dso_r->name);
409 }
410
411 static size_t
412 sort__dso_print(FILE *fp, struct hist_entry *self)
413 {
414 if (self->dso)
415 return fprintf(fp, " %-25s", self->dso->name);
416
417 return fprintf(fp, " %016llx", (__u64)self->ip);
418 }
419
420 static struct sort_entry sort_dso = {
421 .header = " Shared Object ",
422 .cmp = sort__dso_cmp,
423 .print = sort__dso_print,
424 };
425
426 /* --sort symbol */
427
428 static int64_t
429 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
430 {
431 uint64_t ip_l, ip_r;
432
433 if (left->sym == right->sym)
434 return 0;
435
436 ip_l = left->sym ? left->sym->start : left->ip;
437 ip_r = right->sym ? right->sym->start : right->ip;
438
439 return (int64_t)(ip_r - ip_l);
440 }
441
442 static size_t
443 sort__sym_print(FILE *fp, struct hist_entry *self)
444 {
445 size_t ret = 0;
446
447 if (verbose)
448 ret += fprintf(fp, " %#018llx", (__u64)self->ip);
449
450 if (self->dso)
451 ret += fprintf(fp, " %s: ", self->dso->name);
452 else
453 ret += fprintf(fp, " %#016llx: ", (__u64)self->ip);
454
455 if (self->sym)
456 ret += fprintf(fp, "%s", self->sym->name);
457 else
458 ret += fprintf(fp, "%#016llx", (__u64)self->ip);
459
460 return ret;
461 }
462
463 static struct sort_entry sort_sym = {
464 .header = " Shared Object: Symbol",
465 .cmp = sort__sym_cmp,
466 .print = sort__sym_print,
467 };
468
469 static int sort__need_collapse = 0;
470
471 struct sort_dimension {
472 char *name;
473 struct sort_entry *entry;
474 int taken;
475 };
476
477 static struct sort_dimension sort_dimensions[] = {
478 { .name = "pid", .entry = &sort_thread, },
479 { .name = "comm", .entry = &sort_comm, },
480 { .name = "dso", .entry = &sort_dso, },
481 { .name = "symbol", .entry = &sort_sym, },
482 };
483
484 static LIST_HEAD(hist_entry__sort_list);
485
486 static int sort_dimension__add(char *tok)
487 {
488 int i;
489
490 for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
491 struct sort_dimension *sd = &sort_dimensions[i];
492
493 if (sd->taken)
494 continue;
495
496 if (strncasecmp(tok, sd->name, strlen(tok)))
497 continue;
498
499 if (sd->entry->collapse)
500 sort__need_collapse = 1;
501
502 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
503 sd->taken = 1;
504
505 return 0;
506 }
507
508 return -ESRCH;
509 }
510
511 static int64_t
512 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
513 {
514 struct sort_entry *se;
515 int64_t cmp = 0;
516
517 list_for_each_entry(se, &hist_entry__sort_list, list) {
518 cmp = se->cmp(left, right);
519 if (cmp)
520 break;
521 }
522
523 return cmp;
524 }
525
526 static int64_t
527 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
528 {
529 struct sort_entry *se;
530 int64_t cmp = 0;
531
532 list_for_each_entry(se, &hist_entry__sort_list, list) {
533 int64_t (*f)(struct hist_entry *, struct hist_entry *);
534
535 f = se->collapse ?: se->cmp;
536
537 cmp = f(left, right);
538 if (cmp)
539 break;
540 }
541
542 return cmp;
543 }
544
545 static size_t
546 hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples)
547 {
548 struct sort_entry *se;
549 size_t ret;
550
551 if (total_samples) {
552 ret = fprintf(fp, " %6.2f%%",
553 (self->count * 100.0) / total_samples);
554 } else
555 ret = fprintf(fp, "%12d ", self->count);
556
557 list_for_each_entry(se, &hist_entry__sort_list, list)
558 ret += se->print(fp, self);
559
560 ret += fprintf(fp, "\n");
561
562 return ret;
563 }
564
565 /*
566 * collect histogram counts
567 */
568
569 static int
570 hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
571 struct symbol *sym, uint64_t ip, char level)
572 {
573 struct rb_node **p = &hist.rb_node;
574 struct rb_node *parent = NULL;
575 struct hist_entry *he;
576 struct hist_entry entry = {
577 .thread = thread,
578 .map = map,
579 .dso = dso,
580 .sym = sym,
581 .ip = ip,
582 .level = level,
583 .count = 1,
584 };
585 int cmp;
586
587 while (*p != NULL) {
588 parent = *p;
589 he = rb_entry(parent, struct hist_entry, rb_node);
590
591 cmp = hist_entry__cmp(&entry, he);
592
593 if (!cmp) {
594 he->count++;
595 return 0;
596 }
597
598 if (cmp < 0)
599 p = &(*p)->rb_left;
600 else
601 p = &(*p)->rb_right;
602 }
603
604 he = malloc(sizeof(*he));
605 if (!he)
606 return -ENOMEM;
607 *he = entry;
608 rb_link_node(&he->rb_node, parent, p);
609 rb_insert_color(&he->rb_node, &hist);
610
611 return 0;
612 }
613
614 static void hist_entry__free(struct hist_entry *he)
615 {
616 free(he);
617 }
618
619 /*
620 * collapse the histogram
621 */
622
623 static struct rb_root collapse_hists;
624
625 static void collapse__insert_entry(struct hist_entry *he)
626 {
627 struct rb_node **p = &collapse_hists.rb_node;
628 struct rb_node *parent = NULL;
629 struct hist_entry *iter;
630 int64_t cmp;
631
632 while (*p != NULL) {
633 parent = *p;
634 iter = rb_entry(parent, struct hist_entry, rb_node);
635
636 cmp = hist_entry__collapse(iter, he);
637
638 if (!cmp) {
639 iter->count += he->count;
640 hist_entry__free(he);
641 return;
642 }
643
644 if (cmp < 0)
645 p = &(*p)->rb_left;
646 else
647 p = &(*p)->rb_right;
648 }
649
650 rb_link_node(&he->rb_node, parent, p);
651 rb_insert_color(&he->rb_node, &collapse_hists);
652 }
653
654 static void collapse__resort(void)
655 {
656 struct rb_node *next;
657 struct hist_entry *n;
658
659 if (!sort__need_collapse)
660 return;
661
662 next = rb_first(&hist);
663 while (next) {
664 n = rb_entry(next, struct hist_entry, rb_node);
665 next = rb_next(&n->rb_node);
666
667 rb_erase(&n->rb_node, &hist);
668 collapse__insert_entry(n);
669 }
670 }
671
672 /*
673 * reverse the map, sort on count.
674 */
675
676 static struct rb_root output_hists;
677
678 static void output__insert_entry(struct hist_entry *he)
679 {
680 struct rb_node **p = &output_hists.rb_node;
681 struct rb_node *parent = NULL;
682 struct hist_entry *iter;
683
684 while (*p != NULL) {
685 parent = *p;
686 iter = rb_entry(parent, struct hist_entry, rb_node);
687
688 if (he->count > iter->count)
689 p = &(*p)->rb_left;
690 else
691 p = &(*p)->rb_right;
692 }
693
694 rb_link_node(&he->rb_node, parent, p);
695 rb_insert_color(&he->rb_node, &output_hists);
696 }
697
698 static void output__resort(void)
699 {
700 struct rb_node *next;
701 struct hist_entry *n;
702
703 if (sort__need_collapse)
704 next = rb_first(&collapse_hists);
705 else
706 next = rb_first(&hist);
707
708 while (next) {
709 n = rb_entry(next, struct hist_entry, rb_node);
710 next = rb_next(&n->rb_node);
711
712 rb_erase(&n->rb_node, &hist);
713 output__insert_entry(n);
714 }
715 }
716
717 static size_t output__fprintf(FILE *fp, uint64_t total_samples)
718 {
719 struct hist_entry *pos;
720 struct sort_entry *se;
721 struct rb_node *nd;
722 size_t ret = 0;
723
724 fprintf(fp, "#\n");
725
726 fprintf(fp, "# Overhead");
727 list_for_each_entry(se, &hist_entry__sort_list, list)
728 fprintf(fp, " %s", se->header);
729 fprintf(fp, "\n");
730
731 fprintf(fp, "# ........");
732 list_for_each_entry(se, &hist_entry__sort_list, list) {
733 int i;
734
735 fprintf(fp, " ");
736 for (i = 0; i < strlen(se->header)-1; i++)
737 fprintf(fp, ".");
738 }
739 fprintf(fp, "\n");
740
741 fprintf(fp, "#\n");
742
743 for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
744 pos = rb_entry(nd, struct hist_entry, rb_node);
745 ret += hist_entry__fprintf(fp, pos, total_samples);
746 }
747
748 return ret;
749 }
750
751 static void register_idle_thread(void)
752 {
753 struct thread *thread = threads__findnew(0);
754
755 if (thread == NULL ||
756 thread__set_comm(thread, "[idle]")) {
757 fprintf(stderr, "problem inserting idle task.\n");
758 exit(-1);
759 }
760 }
761
762 static unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0;
763
764 static int
765 process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
766 {
767 char level;
768 int show = 0;
769 struct dso *dso = NULL;
770 struct thread *thread = threads__findnew(event->ip.pid);
771 uint64_t ip = event->ip.ip;
772 struct map *map = NULL;
773
774 dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
775 (void *)(offset + head),
776 (void *)(long)(event->header.size),
777 event->header.misc,
778 event->ip.pid,
779 (void *)(long)ip);
780
781 dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
782
783 if (thread == NULL) {
784 fprintf(stderr, "problem processing %d event, skipping it.\n",
785 event->header.type);
786 return -1;
787 }
788
789 if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
790 show = SHOW_KERNEL;
791 level = 'k';
792
793 dso = kernel_dso;
794
795 dprintf(" ...... dso: %s\n", dso->name);
796
797 } else if (event->header.misc & PERF_EVENT_MISC_USER) {
798
799 show = SHOW_USER;
800 level = '.';
801
802 map = thread__find_map(thread, ip);
803 if (map != NULL) {
804 dso = map->dso;
805 ip -= map->start + map->pgoff;
806 } else {
807 /*
808 * If this is outside of all known maps,
809 * and is a negative address, try to look it
810 * up in the kernel dso, as it might be a
811 * vsyscall (which executes in user-mode):
812 */
813 if ((long long)ip < 0)
814 dso = kernel_dso;
815 }
816 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
817
818 } else {
819 show = SHOW_HV;
820 level = 'H';
821 dprintf(" ...... dso: [hypervisor]\n");
822 }
823
824 if (show & show_mask) {
825 struct symbol *sym = dso__find_symbol(dso, ip);
826
827 if (hist_entry__add(thread, map, dso, sym, ip, level)) {
828 fprintf(stderr,
829 "problem incrementing symbol count, skipping event\n");
830 return -1;
831 }
832 }
833 total++;
834
835 return 0;
836 }
837
838 static int
839 process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
840 {
841 struct thread *thread = threads__findnew(event->mmap.pid);
842 struct map *map = map__new(&event->mmap);
843
844 dprintf("%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n",
845 (void *)(offset + head),
846 (void *)(long)(event->header.size),
847 (void *)(long)event->mmap.start,
848 (void *)(long)event->mmap.len,
849 (void *)(long)event->mmap.pgoff,
850 event->mmap.filename);
851
852 if (thread == NULL || map == NULL) {
853 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
854 return -1;
855 }
856
857 thread__insert_map(thread, map);
858 total_mmap++;
859
860 return 0;
861 }
862
863 static int
864 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
865 {
866 struct thread *thread = threads__findnew(event->comm.pid);
867
868 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
869 (void *)(offset + head),
870 (void *)(long)(event->header.size),
871 event->comm.comm, event->comm.pid);
872
873 if (thread == NULL ||
874 thread__set_comm(thread, event->comm.comm)) {
875 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
876 return -1;
877 }
878 total_comm++;
879
880 return 0;
881 }
882
883 static int
884 process_event(event_t *event, unsigned long offset, unsigned long head)
885 {
886 if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
887 return process_overflow_event(event, offset, head);
888
889 switch (event->header.type) {
890 case PERF_EVENT_MMAP:
891 return process_mmap_event(event, offset, head);
892
893 case PERF_EVENT_COMM:
894 return process_comm_event(event, offset, head);
895
896 default:
897 return -1;
898 }
899
900 return 0;
901 }
902
903 static int __cmd_report(void)
904 {
905 int ret, rc = EXIT_FAILURE;
906 unsigned long offset = 0;
907 unsigned long head = 0;
908 struct stat stat;
909 event_t *event;
910 uint32_t size;
911 char *buf;
912
913 register_idle_thread();
914
915 input = open(input_name, O_RDONLY);
916 if (input < 0) {
917 perror("failed to open file");
918 exit(-1);
919 }
920
921 ret = fstat(input, &stat);
922 if (ret < 0) {
923 perror("failed to stat file");
924 exit(-1);
925 }
926
927 if (!stat.st_size) {
928 fprintf(stderr, "zero-sized file, nothing to do!\n");
929 exit(0);
930 }
931
932 if (load_kernel() < 0) {
933 perror("failed to load kernel symbols");
934 return EXIT_FAILURE;
935 }
936
937 if (!full_paths) {
938 if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
939 perror("failed to get the current directory");
940 return EXIT_FAILURE;
941 }
942 cwdlen = strlen(cwd);
943 } else {
944 cwd = NULL;
945 cwdlen = 0;
946 }
947 remap:
948 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
949 MAP_SHARED, input, offset);
950 if (buf == MAP_FAILED) {
951 perror("failed to mmap file");
952 exit(-1);
953 }
954
955 more:
956 event = (event_t *)(buf + head);
957
958 size = event->header.size;
959 if (!size)
960 size = 8;
961
962 if (head + event->header.size >= page_size * mmap_window) {
963 unsigned long shift = page_size * (head / page_size);
964 int ret;
965
966 ret = munmap(buf, page_size * mmap_window);
967 assert(ret == 0);
968
969 offset += shift;
970 head -= shift;
971 goto remap;
972 }
973
974 size = event->header.size;
975
976 if (!size || process_event(event, offset, head) < 0) {
977
978 dprintf("%p [%p]: skipping unknown header type: %d\n",
979 (void *)(offset + head),
980 (void *)(long)(event->header.size),
981 event->header.type);
982
983 total_unknown++;
984
985 /*
986 * assume we lost track of the stream, check alignment, and
987 * increment a single u64 in the hope to catch on again 'soon'.
988 */
989
990 if (unlikely(head & 7))
991 head &= ~7ULL;
992
993 size = 8;
994 }
995
996 head += size;
997
998 if (offset + head < stat.st_size)
999 goto more;
1000
1001 rc = EXIT_SUCCESS;
1002 close(input);
1003
1004 dprintf(" IP events: %10ld\n", total);
1005 dprintf(" mmap events: %10ld\n", total_mmap);
1006 dprintf(" comm events: %10ld\n", total_comm);
1007 dprintf(" unknown events: %10ld\n", total_unknown);
1008
1009 if (dump_trace)
1010 return 0;
1011
1012 if (verbose >= 2)
1013 dsos__fprintf(stdout);
1014
1015 collapse__resort();
1016 output__resort();
1017 output__fprintf(stdout, total);
1018
1019 return rc;
1020 }
1021
1022 static const char * const report_usage[] = {
1023 "perf report [<options>] <command>",
1024 NULL
1025 };
1026
1027 static const struct option options[] = {
1028 OPT_STRING('i', "input", &input_name, "file",
1029 "input file name"),
1030 OPT_BOOLEAN('v', "verbose", &verbose,
1031 "be more verbose (show symbol address, etc)"),
1032 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1033 "dump raw trace in ASCII"),
1034 OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
1035 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1036 "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
1037 OPT_BOOLEAN('P', "full-paths", &full_paths,
1038 "Don't shorten the pathnames taking into account the cwd"),
1039 OPT_END()
1040 };
1041
1042 static void setup_sorting(void)
1043 {
1044 char *tmp, *tok, *str = strdup(sort_order);
1045
1046 for (tok = strtok_r(str, ", ", &tmp);
1047 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1048 if (sort_dimension__add(tok) < 0) {
1049 error("Unknown --sort key: `%s'", tok);
1050 usage_with_options(report_usage, options);
1051 }
1052 }
1053
1054 free(str);
1055 }
1056
1057 int cmd_report(int argc, const char **argv, const char *prefix)
1058 {
1059 symbol__init();
1060
1061 page_size = getpagesize();
1062
1063 parse_options(argc, argv, options, report_usage, 0);
1064
1065 setup_sorting();
1066
1067 setup_pager();
1068
1069 return __cmd_report();
1070 }