Merge branch '/tip/perf/filter' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / tools / perf / util / event.c
1 #include <linux/types.h>
2 #include "event.h"
3 #include "debug.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "string.h"
7 #include "strlist.h"
8 #include "thread.h"
9 #include "thread_map.h"
10
11 static const char *perf_event__names[] = {
12 [0] = "TOTAL",
13 [PERF_RECORD_MMAP] = "MMAP",
14 [PERF_RECORD_LOST] = "LOST",
15 [PERF_RECORD_COMM] = "COMM",
16 [PERF_RECORD_EXIT] = "EXIT",
17 [PERF_RECORD_THROTTLE] = "THROTTLE",
18 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
19 [PERF_RECORD_FORK] = "FORK",
20 [PERF_RECORD_READ] = "READ",
21 [PERF_RECORD_SAMPLE] = "SAMPLE",
22 [PERF_RECORD_HEADER_ATTR] = "ATTR",
23 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
24 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
25 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
26 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
27 };
28
29 const char *perf_event__name(unsigned int id)
30 {
31 if (id >= ARRAY_SIZE(perf_event__names))
32 return "INVALID";
33 if (!perf_event__names[id])
34 return "UNKNOWN";
35 return perf_event__names[id];
36 }
37
38 static struct perf_sample synth_sample = {
39 .pid = -1,
40 .tid = -1,
41 .time = -1,
42 .stream_id = -1,
43 .cpu = -1,
44 .period = 1,
45 };
46
47 static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid,
48 int full, perf_event__handler_t process,
49 struct perf_session *session)
50 {
51 char filename[PATH_MAX];
52 char bf[BUFSIZ];
53 FILE *fp;
54 size_t size = 0;
55 DIR *tasks;
56 struct dirent dirent, *next;
57 pid_t tgid = 0;
58
59 snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
60
61 fp = fopen(filename, "r");
62 if (fp == NULL) {
63 out_race:
64 /*
65 * We raced with a task exiting - just return:
66 */
67 pr_debug("couldn't open %s\n", filename);
68 return 0;
69 }
70
71 memset(&event->comm, 0, sizeof(event->comm));
72
73 while (!event->comm.comm[0] || !event->comm.pid) {
74 if (fgets(bf, sizeof(bf), fp) == NULL) {
75 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
76 goto out;
77 }
78
79 if (memcmp(bf, "Name:", 5) == 0) {
80 char *name = bf + 5;
81 while (*name && isspace(*name))
82 ++name;
83 size = strlen(name) - 1;
84 memcpy(event->comm.comm, name, size++);
85 } else if (memcmp(bf, "Tgid:", 5) == 0) {
86 char *tgids = bf + 5;
87 while (*tgids && isspace(*tgids))
88 ++tgids;
89 tgid = event->comm.pid = atoi(tgids);
90 }
91 }
92
93 event->comm.header.type = PERF_RECORD_COMM;
94 size = ALIGN(size, sizeof(u64));
95 memset(event->comm.comm + size, 0, session->id_hdr_size);
96 event->comm.header.size = (sizeof(event->comm) -
97 (sizeof(event->comm.comm) - size) +
98 session->id_hdr_size);
99 if (!full) {
100 event->comm.tid = pid;
101
102 process(event, &synth_sample, session);
103 goto out;
104 }
105
106 snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
107
108 tasks = opendir(filename);
109 if (tasks == NULL)
110 goto out_race;
111
112 while (!readdir_r(tasks, &dirent, &next) && next) {
113 char *end;
114 pid = strtol(dirent.d_name, &end, 10);
115 if (*end)
116 continue;
117
118 event->comm.tid = pid;
119
120 process(event, &synth_sample, session);
121 }
122
123 closedir(tasks);
124 out:
125 fclose(fp);
126
127 return tgid;
128 }
129
130 static int perf_event__synthesize_mmap_events(union perf_event *event,
131 pid_t pid, pid_t tgid,
132 perf_event__handler_t process,
133 struct perf_session *session)
134 {
135 char filename[PATH_MAX];
136 FILE *fp;
137
138 snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
139
140 fp = fopen(filename, "r");
141 if (fp == NULL) {
142 /*
143 * We raced with a task exiting - just return:
144 */
145 pr_debug("couldn't open %s\n", filename);
146 return -1;
147 }
148
149 event->header.type = PERF_RECORD_MMAP;
150 /*
151 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
152 */
153 event->header.misc = PERF_RECORD_MISC_USER;
154
155 while (1) {
156 char bf[BUFSIZ], *pbf = bf;
157 int n;
158 size_t size;
159 if (fgets(bf, sizeof(bf), fp) == NULL)
160 break;
161
162 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
163 n = hex2u64(pbf, &event->mmap.start);
164 if (n < 0)
165 continue;
166 pbf += n + 1;
167 n = hex2u64(pbf, &event->mmap.len);
168 if (n < 0)
169 continue;
170 pbf += n + 3;
171 if (*pbf == 'x') { /* vm_exec */
172 char *execname = strchr(bf, '/');
173
174 /* Catch VDSO */
175 if (execname == NULL)
176 execname = strstr(bf, "[vdso]");
177
178 if (execname == NULL)
179 continue;
180
181 pbf += 3;
182 n = hex2u64(pbf, &event->mmap.pgoff);
183
184 size = strlen(execname);
185 execname[size - 1] = '\0'; /* Remove \n */
186 memcpy(event->mmap.filename, execname, size);
187 size = ALIGN(size, sizeof(u64));
188 event->mmap.len -= event->mmap.start;
189 event->mmap.header.size = (sizeof(event->mmap) -
190 (sizeof(event->mmap.filename) - size));
191 memset(event->mmap.filename + size, 0, session->id_hdr_size);
192 event->mmap.header.size += session->id_hdr_size;
193 event->mmap.pid = tgid;
194 event->mmap.tid = pid;
195
196 process(event, &synth_sample, session);
197 }
198 }
199
200 fclose(fp);
201 return 0;
202 }
203
204 int perf_event__synthesize_modules(perf_event__handler_t process,
205 struct perf_session *session,
206 struct machine *machine)
207 {
208 struct rb_node *nd;
209 struct map_groups *kmaps = &machine->kmaps;
210 union perf_event *event = zalloc((sizeof(event->mmap) +
211 session->id_hdr_size));
212 if (event == NULL) {
213 pr_debug("Not enough memory synthesizing mmap event "
214 "for kernel modules\n");
215 return -1;
216 }
217
218 event->header.type = PERF_RECORD_MMAP;
219
220 /*
221 * kernel uses 0 for user space maps, see kernel/perf_event.c
222 * __perf_event_mmap
223 */
224 if (machine__is_host(machine))
225 event->header.misc = PERF_RECORD_MISC_KERNEL;
226 else
227 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
228
229 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
230 nd; nd = rb_next(nd)) {
231 size_t size;
232 struct map *pos = rb_entry(nd, struct map, rb_node);
233
234 if (pos->dso->kernel)
235 continue;
236
237 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
238 event->mmap.header.type = PERF_RECORD_MMAP;
239 event->mmap.header.size = (sizeof(event->mmap) -
240 (sizeof(event->mmap.filename) - size));
241 memset(event->mmap.filename + size, 0, session->id_hdr_size);
242 event->mmap.header.size += session->id_hdr_size;
243 event->mmap.start = pos->start;
244 event->mmap.len = pos->end - pos->start;
245 event->mmap.pid = machine->pid;
246
247 memcpy(event->mmap.filename, pos->dso->long_name,
248 pos->dso->long_name_len + 1);
249 process(event, &synth_sample, session);
250 }
251
252 free(event);
253 return 0;
254 }
255
256 static int __event__synthesize_thread(union perf_event *comm_event,
257 union perf_event *mmap_event,
258 pid_t pid, perf_event__handler_t process,
259 struct perf_session *session)
260 {
261 pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process,
262 session);
263 if (tgid == -1)
264 return -1;
265 return perf_event__synthesize_mmap_events(mmap_event, pid, tgid,
266 process, session);
267 }
268
269 int perf_event__synthesize_thread_map(struct thread_map *threads,
270 perf_event__handler_t process,
271 struct perf_session *session)
272 {
273 union perf_event *comm_event, *mmap_event;
274 int err = -1, thread;
275
276 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
277 if (comm_event == NULL)
278 goto out;
279
280 mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
281 if (mmap_event == NULL)
282 goto out_free_comm;
283
284 err = 0;
285 for (thread = 0; thread < threads->nr; ++thread) {
286 if (__event__synthesize_thread(comm_event, mmap_event,
287 threads->map[thread],
288 process, session)) {
289 err = -1;
290 break;
291 }
292 }
293 free(mmap_event);
294 out_free_comm:
295 free(comm_event);
296 out:
297 return err;
298 }
299
300 int perf_event__synthesize_threads(perf_event__handler_t process,
301 struct perf_session *session)
302 {
303 DIR *proc;
304 struct dirent dirent, *next;
305 union perf_event *comm_event, *mmap_event;
306 int err = -1;
307
308 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
309 if (comm_event == NULL)
310 goto out;
311
312 mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
313 if (mmap_event == NULL)
314 goto out_free_comm;
315
316 proc = opendir("/proc");
317 if (proc == NULL)
318 goto out_free_mmap;
319
320 while (!readdir_r(proc, &dirent, &next) && next) {
321 char *end;
322 pid_t pid = strtol(dirent.d_name, &end, 10);
323
324 if (*end) /* only interested in proper numerical dirents */
325 continue;
326
327 __event__synthesize_thread(comm_event, mmap_event, pid,
328 process, session);
329 }
330
331 closedir(proc);
332 err = 0;
333 out_free_mmap:
334 free(mmap_event);
335 out_free_comm:
336 free(comm_event);
337 out:
338 return err;
339 }
340
341 struct process_symbol_args {
342 const char *name;
343 u64 start;
344 };
345
346 static int find_symbol_cb(void *arg, const char *name, char type,
347 u64 start, u64 end __used)
348 {
349 struct process_symbol_args *args = arg;
350
351 /*
352 * Must be a function or at least an alias, as in PARISC64, where "_text" is
353 * an 'A' to the same address as "_stext".
354 */
355 if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
356 type == 'A') || strcmp(name, args->name))
357 return 0;
358
359 args->start = start;
360 return 1;
361 }
362
363 int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
364 struct perf_session *session,
365 struct machine *machine,
366 const char *symbol_name)
367 {
368 size_t size;
369 const char *filename, *mmap_name;
370 char path[PATH_MAX];
371 char name_buff[PATH_MAX];
372 struct map *map;
373 int err;
374 /*
375 * We should get this from /sys/kernel/sections/.text, but till that is
376 * available use this, and after it is use this as a fallback for older
377 * kernels.
378 */
379 struct process_symbol_args args = { .name = symbol_name, };
380 union perf_event *event = zalloc((sizeof(event->mmap) +
381 session->id_hdr_size));
382 if (event == NULL) {
383 pr_debug("Not enough memory synthesizing mmap event "
384 "for kernel modules\n");
385 return -1;
386 }
387
388 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
389 if (machine__is_host(machine)) {
390 /*
391 * kernel uses PERF_RECORD_MISC_USER for user space maps,
392 * see kernel/perf_event.c __perf_event_mmap
393 */
394 event->header.misc = PERF_RECORD_MISC_KERNEL;
395 filename = "/proc/kallsyms";
396 } else {
397 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
398 if (machine__is_default_guest(machine))
399 filename = (char *) symbol_conf.default_guest_kallsyms;
400 else {
401 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
402 filename = path;
403 }
404 }
405
406 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
407 return -ENOENT;
408
409 map = machine->vmlinux_maps[MAP__FUNCTION];
410 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
411 "%s%s", mmap_name, symbol_name) + 1;
412 size = ALIGN(size, sizeof(u64));
413 event->mmap.header.type = PERF_RECORD_MMAP;
414 event->mmap.header.size = (sizeof(event->mmap) -
415 (sizeof(event->mmap.filename) - size) + session->id_hdr_size);
416 event->mmap.pgoff = args.start;
417 event->mmap.start = map->start;
418 event->mmap.len = map->end - event->mmap.start;
419 event->mmap.pid = machine->pid;
420
421 err = process(event, &synth_sample, session);
422 free(event);
423
424 return err;
425 }
426
427 static void thread__comm_adjust(struct thread *self, struct hists *hists)
428 {
429 char *comm = self->comm;
430
431 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
432 (!symbol_conf.comm_list ||
433 strlist__has_entry(symbol_conf.comm_list, comm))) {
434 u16 slen = strlen(comm);
435
436 if (hists__new_col_len(hists, HISTC_COMM, slen))
437 hists__set_col_len(hists, HISTC_THREAD, slen + 6);
438 }
439 }
440
441 static int thread__set_comm_adjust(struct thread *self, const char *comm,
442 struct hists *hists)
443 {
444 int ret = thread__set_comm(self, comm);
445
446 if (ret)
447 return ret;
448
449 thread__comm_adjust(self, hists);
450
451 return 0;
452 }
453
454 int perf_event__process_comm(union perf_event *event,
455 struct perf_sample *sample __used,
456 struct perf_session *session)
457 {
458 struct thread *thread = perf_session__findnew(session, event->comm.tid);
459
460 dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid);
461
462 if (thread == NULL || thread__set_comm_adjust(thread, event->comm.comm,
463 &session->hists)) {
464 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
465 return -1;
466 }
467
468 return 0;
469 }
470
471 int perf_event__process_lost(union perf_event *event,
472 struct perf_sample *sample __used,
473 struct perf_session *session)
474 {
475 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
476 event->lost.id, event->lost.lost);
477 session->hists.stats.total_lost += event->lost.lost;
478 return 0;
479 }
480
481 static void perf_event__set_kernel_mmap_len(union perf_event *event,
482 struct map **maps)
483 {
484 maps[MAP__FUNCTION]->start = event->mmap.start;
485 maps[MAP__FUNCTION]->end = event->mmap.start + event->mmap.len;
486 /*
487 * Be a bit paranoid here, some perf.data file came with
488 * a zero sized synthesized MMAP event for the kernel.
489 */
490 if (maps[MAP__FUNCTION]->end == 0)
491 maps[MAP__FUNCTION]->end = ~0ULL;
492 }
493
494 static int perf_event__process_kernel_mmap(union perf_event *event,
495 struct perf_session *session)
496 {
497 struct map *map;
498 char kmmap_prefix[PATH_MAX];
499 struct machine *machine;
500 enum dso_kernel_type kernel_type;
501 bool is_kernel_mmap;
502
503 machine = perf_session__findnew_machine(session, event->mmap.pid);
504 if (!machine) {
505 pr_err("Can't find id %d's machine\n", event->mmap.pid);
506 goto out_problem;
507 }
508
509 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
510 if (machine__is_host(machine))
511 kernel_type = DSO_TYPE_KERNEL;
512 else
513 kernel_type = DSO_TYPE_GUEST_KERNEL;
514
515 is_kernel_mmap = memcmp(event->mmap.filename,
516 kmmap_prefix,
517 strlen(kmmap_prefix)) == 0;
518 if (event->mmap.filename[0] == '/' ||
519 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
520
521 char short_module_name[1024];
522 char *name, *dot;
523
524 if (event->mmap.filename[0] == '/') {
525 name = strrchr(event->mmap.filename, '/');
526 if (name == NULL)
527 goto out_problem;
528
529 ++name; /* skip / */
530 dot = strrchr(name, '.');
531 if (dot == NULL)
532 goto out_problem;
533 snprintf(short_module_name, sizeof(short_module_name),
534 "[%.*s]", (int)(dot - name), name);
535 strxfrchar(short_module_name, '-', '_');
536 } else
537 strcpy(short_module_name, event->mmap.filename);
538
539 map = machine__new_module(machine, event->mmap.start,
540 event->mmap.filename);
541 if (map == NULL)
542 goto out_problem;
543
544 name = strdup(short_module_name);
545 if (name == NULL)
546 goto out_problem;
547
548 map->dso->short_name = name;
549 map->dso->sname_alloc = 1;
550 map->end = map->start + event->mmap.len;
551 } else if (is_kernel_mmap) {
552 const char *symbol_name = (event->mmap.filename +
553 strlen(kmmap_prefix));
554 /*
555 * Should be there already, from the build-id table in
556 * the header.
557 */
558 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
559 kmmap_prefix);
560 if (kernel == NULL)
561 goto out_problem;
562
563 kernel->kernel = kernel_type;
564 if (__machine__create_kernel_maps(machine, kernel) < 0)
565 goto out_problem;
566
567 perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
568 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
569 symbol_name,
570 event->mmap.pgoff);
571 if (machine__is_default_guest(machine)) {
572 /*
573 * preload dso of guest kernel and modules
574 */
575 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
576 NULL);
577 }
578 }
579 return 0;
580 out_problem:
581 return -1;
582 }
583
584 int perf_event__process_mmap(union perf_event *event,
585 struct perf_sample *sample __used,
586 struct perf_session *session)
587 {
588 struct machine *machine;
589 struct thread *thread;
590 struct map *map;
591 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
592 int ret = 0;
593
594 dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
595 event->mmap.pid, event->mmap.tid, event->mmap.start,
596 event->mmap.len, event->mmap.pgoff, event->mmap.filename);
597
598 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
599 cpumode == PERF_RECORD_MISC_KERNEL) {
600 ret = perf_event__process_kernel_mmap(event, session);
601 if (ret < 0)
602 goto out_problem;
603 return 0;
604 }
605
606 machine = perf_session__find_host_machine(session);
607 if (machine == NULL)
608 goto out_problem;
609 thread = perf_session__findnew(session, event->mmap.pid);
610 if (thread == NULL)
611 goto out_problem;
612 map = map__new(&machine->user_dsos, event->mmap.start,
613 event->mmap.len, event->mmap.pgoff,
614 event->mmap.pid, event->mmap.filename,
615 MAP__FUNCTION);
616 if (map == NULL)
617 goto out_problem;
618
619 thread__insert_map(thread, map);
620 return 0;
621
622 out_problem:
623 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
624 return 0;
625 }
626
627 int perf_event__process_task(union perf_event *event,
628 struct perf_sample *sample __used,
629 struct perf_session *session)
630 {
631 struct thread *thread = perf_session__findnew(session, event->fork.tid);
632 struct thread *parent = perf_session__findnew(session, event->fork.ptid);
633
634 dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
635 event->fork.ppid, event->fork.ptid);
636
637 if (event->header.type == PERF_RECORD_EXIT) {
638 perf_session__remove_thread(session, thread);
639 return 0;
640 }
641
642 if (thread == NULL || parent == NULL ||
643 thread__fork(thread, parent) < 0) {
644 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
645 return -1;
646 }
647
648 return 0;
649 }
650
651 int perf_event__process(union perf_event *event, struct perf_sample *sample,
652 struct perf_session *session)
653 {
654 switch (event->header.type) {
655 case PERF_RECORD_COMM:
656 perf_event__process_comm(event, sample, session);
657 break;
658 case PERF_RECORD_MMAP:
659 perf_event__process_mmap(event, sample, session);
660 break;
661 case PERF_RECORD_FORK:
662 case PERF_RECORD_EXIT:
663 perf_event__process_task(event, sample, session);
664 break;
665 case PERF_RECORD_LOST:
666 perf_event__process_lost(event, sample, session);
667 default:
668 break;
669 }
670
671 return 0;
672 }
673
674 void thread__find_addr_map(struct thread *self,
675 struct perf_session *session, u8 cpumode,
676 enum map_type type, pid_t pid, u64 addr,
677 struct addr_location *al)
678 {
679 struct map_groups *mg = &self->mg;
680 struct machine *machine = NULL;
681
682 al->thread = self;
683 al->addr = addr;
684 al->cpumode = cpumode;
685 al->filtered = false;
686
687 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
688 al->level = 'k';
689 machine = perf_session__find_host_machine(session);
690 if (machine == NULL) {
691 al->map = NULL;
692 return;
693 }
694 mg = &machine->kmaps;
695 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
696 al->level = '.';
697 machine = perf_session__find_host_machine(session);
698 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
699 al->level = 'g';
700 machine = perf_session__find_machine(session, pid);
701 if (machine == NULL) {
702 al->map = NULL;
703 return;
704 }
705 mg = &machine->kmaps;
706 } else {
707 /*
708 * 'u' means guest os user space.
709 * TODO: We don't support guest user space. Might support late.
710 */
711 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
712 al->level = 'u';
713 else
714 al->level = 'H';
715 al->map = NULL;
716
717 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
718 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
719 !perf_guest)
720 al->filtered = true;
721 if ((cpumode == PERF_RECORD_MISC_USER ||
722 cpumode == PERF_RECORD_MISC_KERNEL) &&
723 !perf_host)
724 al->filtered = true;
725
726 return;
727 }
728 try_again:
729 al->map = map_groups__find(mg, type, al->addr);
730 if (al->map == NULL) {
731 /*
732 * If this is outside of all known maps, and is a negative
733 * address, try to look it up in the kernel dso, as it might be
734 * a vsyscall or vdso (which executes in user-mode).
735 *
736 * XXX This is nasty, we should have a symbol list in the
737 * "[vdso]" dso, but for now lets use the old trick of looking
738 * in the whole kernel symbol list.
739 */
740 if ((long long)al->addr < 0 &&
741 cpumode == PERF_RECORD_MISC_KERNEL &&
742 machine && mg != &machine->kmaps) {
743 mg = &machine->kmaps;
744 goto try_again;
745 }
746 } else
747 al->addr = al->map->map_ip(al->map, al->addr);
748 }
749
750 void thread__find_addr_location(struct thread *self,
751 struct perf_session *session, u8 cpumode,
752 enum map_type type, pid_t pid, u64 addr,
753 struct addr_location *al,
754 symbol_filter_t filter)
755 {
756 thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
757 if (al->map != NULL)
758 al->sym = map__find_symbol(al->map, al->addr, filter);
759 else
760 al->sym = NULL;
761 }
762
763 static void dso__calc_col_width(struct dso *self, struct hists *hists)
764 {
765 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
766 (!symbol_conf.dso_list ||
767 strlist__has_entry(symbol_conf.dso_list, self->name))) {
768 u16 slen = dso__name_len(self);
769 hists__new_col_len(hists, HISTC_DSO, slen);
770 }
771
772 self->slen_calculated = 1;
773 }
774
775 int perf_event__preprocess_sample(const union perf_event *event,
776 struct perf_session *session,
777 struct addr_location *al,
778 struct perf_sample *sample,
779 symbol_filter_t filter)
780 {
781 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
782 struct thread *thread = perf_session__findnew(session, event->ip.pid);
783
784 if (thread == NULL)
785 return -1;
786
787 if (symbol_conf.comm_list &&
788 !strlist__has_entry(symbol_conf.comm_list, thread->comm))
789 goto out_filtered;
790
791 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
792 /*
793 * Have we already created the kernel maps for the host machine?
794 *
795 * This should have happened earlier, when we processed the kernel MMAP
796 * events, but for older perf.data files there was no such thing, so do
797 * it now.
798 */
799 if (cpumode == PERF_RECORD_MISC_KERNEL &&
800 session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
801 machine__create_kernel_maps(&session->host_machine);
802
803 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
804 event->ip.pid, event->ip.ip, al);
805 dump_printf(" ...... dso: %s\n",
806 al->map ? al->map->dso->long_name :
807 al->level == 'H' ? "[hypervisor]" : "<not found>");
808 al->sym = NULL;
809 al->cpu = sample->cpu;
810
811 if (al->map) {
812 if (symbol_conf.dso_list &&
813 (!al->map || !al->map->dso ||
814 !(strlist__has_entry(symbol_conf.dso_list,
815 al->map->dso->short_name) ||
816 (al->map->dso->short_name != al->map->dso->long_name &&
817 strlist__has_entry(symbol_conf.dso_list,
818 al->map->dso->long_name)))))
819 goto out_filtered;
820 /*
821 * We have to do this here as we may have a dso with no symbol
822 * hit that has a name longer than the ones with symbols
823 * sampled.
824 */
825 if (!sort_dso.elide && !al->map->dso->slen_calculated)
826 dso__calc_col_width(al->map->dso, &session->hists);
827
828 al->sym = map__find_symbol(al->map, al->addr, filter);
829 } else {
830 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
831
832 if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width &&
833 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
834 !symbol_conf.dso_list)
835 hists__set_col_len(&session->hists, HISTC_DSO,
836 unresolved_col_width);
837 }
838
839 if (symbol_conf.sym_list && al->sym &&
840 !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
841 goto out_filtered;
842
843 return 0;
844
845 out_filtered:
846 al->filtered = true;
847 return 0;
848 }