1 // SPDX-License-Identifier: GPL-2.0
18 #include <sys/types.h>
22 #include "linux/hash.h"
25 #include "sane_ctype.h"
26 #include <symbol/kallsyms.h>
28 static void __machine__remove_thread(struct machine
*machine
, struct thread
*th
, bool lock
);
30 static void dsos__init(struct dsos
*dsos
)
32 INIT_LIST_HEAD(&dsos
->head
);
34 pthread_rwlock_init(&dsos
->lock
, NULL
);
37 int machine__init(struct machine
*machine
, const char *root_dir
, pid_t pid
)
39 memset(machine
, 0, sizeof(*machine
));
40 map_groups__init(&machine
->kmaps
, machine
);
41 RB_CLEAR_NODE(&machine
->rb_node
);
42 dsos__init(&machine
->dsos
);
44 machine
->threads
= RB_ROOT
;
45 pthread_rwlock_init(&machine
->threads_lock
, NULL
);
46 machine
->nr_threads
= 0;
47 INIT_LIST_HEAD(&machine
->dead_threads
);
48 machine
->last_match
= NULL
;
50 machine
->vdso_info
= NULL
;
55 machine
->id_hdr_size
= 0;
56 machine
->kptr_restrict_warned
= false;
57 machine
->comm_exec
= false;
58 machine
->kernel_start
= 0;
60 memset(machine
->vmlinux_maps
, 0, sizeof(machine
->vmlinux_maps
));
62 machine
->root_dir
= strdup(root_dir
);
63 if (machine
->root_dir
== NULL
)
66 if (pid
!= HOST_KERNEL_ID
) {
67 struct thread
*thread
= machine__findnew_thread(machine
, -1,
74 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
75 thread__set_comm(thread
, comm
, 0);
79 machine
->current_tid
= NULL
;
84 struct machine
*machine__new_host(void)
86 struct machine
*machine
= malloc(sizeof(*machine
));
88 if (machine
!= NULL
) {
89 machine__init(machine
, "", HOST_KERNEL_ID
);
91 if (machine__create_kernel_maps(machine
) < 0)
101 struct machine
*machine__new_kallsyms(void)
103 struct machine
*machine
= machine__new_host();
106 * 1) MAP__FUNCTION will go away when we stop loading separate maps for
107 * functions and data objects.
108 * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
109 * ask for not using the kcore parsing code, once this one is fixed
110 * to create a map per module.
112 if (machine
&& __machine__load_kallsyms(machine
, "/proc/kallsyms", MAP__FUNCTION
, true) <= 0) {
113 machine__delete(machine
);
120 static void dsos__purge(struct dsos
*dsos
)
124 pthread_rwlock_wrlock(&dsos
->lock
);
126 list_for_each_entry_safe(pos
, n
, &dsos
->head
, node
) {
127 RB_CLEAR_NODE(&pos
->rb_node
);
129 list_del_init(&pos
->node
);
133 pthread_rwlock_unlock(&dsos
->lock
);
136 static void dsos__exit(struct dsos
*dsos
)
139 pthread_rwlock_destroy(&dsos
->lock
);
142 void machine__delete_threads(struct machine
*machine
)
146 pthread_rwlock_wrlock(&machine
->threads_lock
);
147 nd
= rb_first(&machine
->threads
);
149 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
152 __machine__remove_thread(machine
, t
, false);
154 pthread_rwlock_unlock(&machine
->threads_lock
);
157 void machine__exit(struct machine
*machine
)
159 machine__destroy_kernel_maps(machine
);
160 map_groups__exit(&machine
->kmaps
);
161 dsos__exit(&machine
->dsos
);
162 machine__exit_vdso(machine
);
163 zfree(&machine
->root_dir
);
164 zfree(&machine
->current_tid
);
165 pthread_rwlock_destroy(&machine
->threads_lock
);
168 void machine__delete(struct machine
*machine
)
171 machine__exit(machine
);
176 void machines__init(struct machines
*machines
)
178 machine__init(&machines
->host
, "", HOST_KERNEL_ID
);
179 machines
->guests
= RB_ROOT
;
182 void machines__exit(struct machines
*machines
)
184 machine__exit(&machines
->host
);
188 struct machine
*machines__add(struct machines
*machines
, pid_t pid
,
189 const char *root_dir
)
191 struct rb_node
**p
= &machines
->guests
.rb_node
;
192 struct rb_node
*parent
= NULL
;
193 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
198 if (machine__init(machine
, root_dir
, pid
) != 0) {
205 pos
= rb_entry(parent
, struct machine
, rb_node
);
212 rb_link_node(&machine
->rb_node
, parent
, p
);
213 rb_insert_color(&machine
->rb_node
, &machines
->guests
);
218 void machines__set_comm_exec(struct machines
*machines
, bool comm_exec
)
222 machines
->host
.comm_exec
= comm_exec
;
224 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
225 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
227 machine
->comm_exec
= comm_exec
;
231 struct machine
*machines__find(struct machines
*machines
, pid_t pid
)
233 struct rb_node
**p
= &machines
->guests
.rb_node
;
234 struct rb_node
*parent
= NULL
;
235 struct machine
*machine
;
236 struct machine
*default_machine
= NULL
;
238 if (pid
== HOST_KERNEL_ID
)
239 return &machines
->host
;
243 machine
= rb_entry(parent
, struct machine
, rb_node
);
244 if (pid
< machine
->pid
)
246 else if (pid
> machine
->pid
)
251 default_machine
= machine
;
254 return default_machine
;
257 struct machine
*machines__findnew(struct machines
*machines
, pid_t pid
)
260 const char *root_dir
= "";
261 struct machine
*machine
= machines__find(machines
, pid
);
263 if (machine
&& (machine
->pid
== pid
))
266 if ((pid
!= HOST_KERNEL_ID
) &&
267 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
268 (symbol_conf
.guestmount
)) {
269 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
270 if (access(path
, R_OK
)) {
271 static struct strlist
*seen
;
274 seen
= strlist__new(NULL
, NULL
);
276 if (!strlist__has_entry(seen
, path
)) {
277 pr_err("Can't access file %s\n", path
);
278 strlist__add(seen
, path
);
286 machine
= machines__add(machines
, pid
, root_dir
);
291 void machines__process_guests(struct machines
*machines
,
292 machine__process_t process
, void *data
)
296 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
297 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
302 char *machine__mmap_name(struct machine
*machine
, char *bf
, size_t size
)
304 if (machine__is_host(machine
))
305 snprintf(bf
, size
, "[%s]", "kernel.kallsyms");
306 else if (machine__is_default_guest(machine
))
307 snprintf(bf
, size
, "[%s]", "guest.kernel.kallsyms");
309 snprintf(bf
, size
, "[%s.%d]", "guest.kernel.kallsyms",
316 void machines__set_id_hdr_size(struct machines
*machines
, u16 id_hdr_size
)
318 struct rb_node
*node
;
319 struct machine
*machine
;
321 machines
->host
.id_hdr_size
= id_hdr_size
;
323 for (node
= rb_first(&machines
->guests
); node
; node
= rb_next(node
)) {
324 machine
= rb_entry(node
, struct machine
, rb_node
);
325 machine
->id_hdr_size
= id_hdr_size
;
331 static void machine__update_thread_pid(struct machine
*machine
,
332 struct thread
*th
, pid_t pid
)
334 struct thread
*leader
;
336 if (pid
== th
->pid_
|| pid
== -1 || th
->pid_
!= -1)
341 if (th
->pid_
== th
->tid
)
344 leader
= __machine__findnew_thread(machine
, th
->pid_
, th
->pid_
);
349 leader
->mg
= map_groups__new(machine
);
354 if (th
->mg
== leader
->mg
)
359 * Maps are created from MMAP events which provide the pid and
360 * tid. Consequently there never should be any maps on a thread
361 * with an unknown pid. Just print an error if there are.
363 if (!map_groups__empty(th
->mg
))
364 pr_err("Discarding thread maps for %d:%d\n",
366 map_groups__put(th
->mg
);
369 th
->mg
= map_groups__get(leader
->mg
);
374 pr_err("Failed to join map groups for %d:%d\n", th
->pid_
, th
->tid
);
379 * Caller must eventually drop thread->refcnt returned with a successful
380 * lookup/new thread inserted.
382 static struct thread
*____machine__findnew_thread(struct machine
*machine
,
383 pid_t pid
, pid_t tid
,
386 struct rb_node
**p
= &machine
->threads
.rb_node
;
387 struct rb_node
*parent
= NULL
;
391 * Front-end cache - TID lookups come in blocks,
392 * so most of the time we dont have to look up
395 th
= machine
->last_match
;
397 if (th
->tid
== tid
) {
398 machine__update_thread_pid(machine
, th
, pid
);
399 return thread__get(th
);
402 machine
->last_match
= NULL
;
407 th
= rb_entry(parent
, struct thread
, rb_node
);
409 if (th
->tid
== tid
) {
410 machine
->last_match
= th
;
411 machine__update_thread_pid(machine
, th
, pid
);
412 return thread__get(th
);
424 th
= thread__new(pid
, tid
);
426 rb_link_node(&th
->rb_node
, parent
, p
);
427 rb_insert_color(&th
->rb_node
, &machine
->threads
);
430 * We have to initialize map_groups separately
431 * after rb tree is updated.
433 * The reason is that we call machine__findnew_thread
434 * within thread__init_map_groups to find the thread
435 * leader and that would screwed the rb tree.
437 if (thread__init_map_groups(th
, machine
)) {
438 rb_erase_init(&th
->rb_node
, &machine
->threads
);
439 RB_CLEAR_NODE(&th
->rb_node
);
444 * It is now in the rbtree, get a ref
447 machine
->last_match
= th
;
448 ++machine
->nr_threads
;
454 struct thread
*__machine__findnew_thread(struct machine
*machine
, pid_t pid
, pid_t tid
)
456 return ____machine__findnew_thread(machine
, pid
, tid
, true);
459 struct thread
*machine__findnew_thread(struct machine
*machine
, pid_t pid
,
464 pthread_rwlock_wrlock(&machine
->threads_lock
);
465 th
= __machine__findnew_thread(machine
, pid
, tid
);
466 pthread_rwlock_unlock(&machine
->threads_lock
);
470 struct thread
*machine__find_thread(struct machine
*machine
, pid_t pid
,
474 pthread_rwlock_rdlock(&machine
->threads_lock
);
475 th
= ____machine__findnew_thread(machine
, pid
, tid
, false);
476 pthread_rwlock_unlock(&machine
->threads_lock
);
480 struct comm
*machine__thread_exec_comm(struct machine
*machine
,
481 struct thread
*thread
)
483 if (machine
->comm_exec
)
484 return thread__exec_comm(thread
);
486 return thread__comm(thread
);
489 int machine__process_comm_event(struct machine
*machine
, union perf_event
*event
,
490 struct perf_sample
*sample
)
492 struct thread
*thread
= machine__findnew_thread(machine
,
495 bool exec
= event
->header
.misc
& PERF_RECORD_MISC_COMM_EXEC
;
499 machine
->comm_exec
= true;
502 perf_event__fprintf_comm(event
, stdout
);
504 if (thread
== NULL
||
505 __thread__set_comm(thread
, event
->comm
.comm
, sample
->time
, exec
)) {
506 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
515 int machine__process_namespaces_event(struct machine
*machine __maybe_unused
,
516 union perf_event
*event
,
517 struct perf_sample
*sample __maybe_unused
)
519 struct thread
*thread
= machine__findnew_thread(machine
,
520 event
->namespaces
.pid
,
521 event
->namespaces
.tid
);
524 WARN_ONCE(event
->namespaces
.nr_namespaces
> NR_NAMESPACES
,
525 "\nWARNING: kernel seems to support more namespaces than perf"
526 " tool.\nTry updating the perf tool..\n\n");
528 WARN_ONCE(event
->namespaces
.nr_namespaces
< NR_NAMESPACES
,
529 "\nWARNING: perf tool seems to support more namespaces than"
530 " the kernel.\nTry updating the kernel..\n\n");
533 perf_event__fprintf_namespaces(event
, stdout
);
535 if (thread
== NULL
||
536 thread__set_namespaces(thread
, sample
->time
, &event
->namespaces
)) {
537 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
546 int machine__process_lost_event(struct machine
*machine __maybe_unused
,
547 union perf_event
*event
, struct perf_sample
*sample __maybe_unused
)
549 dump_printf(": id:%" PRIu64
": lost:%" PRIu64
"\n",
550 event
->lost
.id
, event
->lost
.lost
);
554 int machine__process_lost_samples_event(struct machine
*machine __maybe_unused
,
555 union perf_event
*event
, struct perf_sample
*sample
)
557 dump_printf(": id:%" PRIu64
": lost samples :%" PRIu64
"\n",
558 sample
->id
, event
->lost_samples
.lost
);
562 static struct dso
*machine__findnew_module_dso(struct machine
*machine
,
564 const char *filename
)
568 pthread_rwlock_wrlock(&machine
->dsos
.lock
);
570 dso
= __dsos__find(&machine
->dsos
, m
->name
, true);
572 dso
= __dsos__addnew(&machine
->dsos
, m
->name
);
576 dso__set_module_info(dso
, m
, machine
);
577 dso__set_long_name(dso
, strdup(filename
), true);
582 pthread_rwlock_unlock(&machine
->dsos
.lock
);
586 int machine__process_aux_event(struct machine
*machine __maybe_unused
,
587 union perf_event
*event
)
590 perf_event__fprintf_aux(event
, stdout
);
594 int machine__process_itrace_start_event(struct machine
*machine __maybe_unused
,
595 union perf_event
*event
)
598 perf_event__fprintf_itrace_start(event
, stdout
);
602 int machine__process_switch_event(struct machine
*machine __maybe_unused
,
603 union perf_event
*event
)
606 perf_event__fprintf_switch(event
, stdout
);
610 static void dso__adjust_kmod_long_name(struct dso
*dso
, const char *filename
)
612 const char *dup_filename
;
614 if (!filename
|| !dso
|| !dso
->long_name
)
616 if (dso
->long_name
[0] != '[')
618 if (!strchr(filename
, '/'))
621 dup_filename
= strdup(filename
);
625 dso__set_long_name(dso
, dup_filename
, true);
628 struct map
*machine__findnew_module_map(struct machine
*machine
, u64 start
,
629 const char *filename
)
631 struct map
*map
= NULL
;
632 struct dso
*dso
= NULL
;
635 if (kmod_path__parse_name(&m
, filename
))
638 map
= map_groups__find_by_name(&machine
->kmaps
, MAP__FUNCTION
,
642 * If the map's dso is an offline module, give dso__load()
643 * a chance to find the file path of that module by fixing
646 dso__adjust_kmod_long_name(map
->dso
, filename
);
650 dso
= machine__findnew_module_dso(machine
, &m
, filename
);
654 map
= map__new2(start
, dso
, MAP__FUNCTION
);
658 map_groups__insert(&machine
->kmaps
, map
);
660 /* Put the map here because map_groups__insert alread got it */
663 /* put the dso here, corresponding to machine__findnew_module_dso */
669 size_t machines__fprintf_dsos(struct machines
*machines
, FILE *fp
)
672 size_t ret
= __dsos__fprintf(&machines
->host
.dsos
.head
, fp
);
674 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
675 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
676 ret
+= __dsos__fprintf(&pos
->dsos
.head
, fp
);
682 size_t machine__fprintf_dsos_buildid(struct machine
*m
, FILE *fp
,
683 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
685 return __dsos__fprintf_buildid(&m
->dsos
.head
, fp
, skip
, parm
);
688 size_t machines__fprintf_dsos_buildid(struct machines
*machines
, FILE *fp
,
689 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
692 size_t ret
= machine__fprintf_dsos_buildid(&machines
->host
, fp
, skip
, parm
);
694 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
695 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
696 ret
+= machine__fprintf_dsos_buildid(pos
, fp
, skip
, parm
);
701 size_t machine__fprintf_vmlinux_path(struct machine
*machine
, FILE *fp
)
705 struct dso
*kdso
= machine__kernel_map(machine
)->dso
;
707 if (kdso
->has_build_id
) {
708 char filename
[PATH_MAX
];
709 if (dso__build_id_filename(kdso
, filename
, sizeof(filename
),
711 printed
+= fprintf(fp
, "[0] %s\n", filename
);
714 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
)
715 printed
+= fprintf(fp
, "[%d] %s\n",
716 i
+ kdso
->has_build_id
, vmlinux_path
[i
]);
721 size_t machine__fprintf(struct machine
*machine
, FILE *fp
)
726 pthread_rwlock_rdlock(&machine
->threads_lock
);
728 ret
= fprintf(fp
, "Threads: %u\n", machine
->nr_threads
);
730 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
731 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
733 ret
+= thread__fprintf(pos
, fp
);
736 pthread_rwlock_unlock(&machine
->threads_lock
);
741 static struct dso
*machine__get_kernel(struct machine
*machine
)
743 const char *vmlinux_name
= NULL
;
746 if (machine__is_host(machine
)) {
747 vmlinux_name
= symbol_conf
.vmlinux_name
;
749 vmlinux_name
= DSO__NAME_KALLSYMS
;
751 kernel
= machine__findnew_kernel(machine
, vmlinux_name
,
752 "[kernel]", DSO_TYPE_KERNEL
);
756 if (machine__is_default_guest(machine
))
757 vmlinux_name
= symbol_conf
.default_guest_vmlinux_name
;
759 vmlinux_name
= machine__mmap_name(machine
, bf
,
762 kernel
= machine__findnew_kernel(machine
, vmlinux_name
,
764 DSO_TYPE_GUEST_KERNEL
);
767 if (kernel
!= NULL
&& (!kernel
->has_build_id
))
768 dso__read_running_kernel_build_id(kernel
, machine
);
773 struct process_args
{
777 static void machine__get_kallsyms_filename(struct machine
*machine
, char *buf
,
780 if (machine__is_default_guest(machine
))
781 scnprintf(buf
, bufsz
, "%s", symbol_conf
.default_guest_kallsyms
);
783 scnprintf(buf
, bufsz
, "%s/proc/kallsyms", machine
->root_dir
);
786 const char *ref_reloc_sym_names
[] = {"_text", "_stext", NULL
};
788 /* Figure out the start address of kernel map from /proc/kallsyms.
789 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
790 * symbol_name if it's not that important.
792 static int machine__get_running_kernel_start(struct machine
*machine
,
793 const char **symbol_name
, u64
*start
)
795 char filename
[PATH_MAX
];
800 machine__get_kallsyms_filename(machine
, filename
, PATH_MAX
);
802 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
805 for (i
= 0; (name
= ref_reloc_sym_names
[i
]) != NULL
; i
++) {
806 err
= kallsyms__get_function_start(filename
, name
, &addr
);
821 /* Kernel-space maps for symbols that are outside the main kernel map and module maps */
822 struct extra_kernel_map
{
828 static int machine__create_extra_kernel_map(struct machine
*machine
,
830 struct extra_kernel_map
*xm
)
835 map
= map__new2(xm
->start
, kernel
, MAP__FUNCTION
);
840 map
->pgoff
= xm
->pgoff
;
842 kmap
= map__kmap(map
);
844 kmap
->kmaps
= &machine
->kmaps
;
846 map_groups__insert(&machine
->kmaps
, map
);
848 pr_debug2("Added extra kernel map %" PRIx64
"-%" PRIx64
"\n",
849 map
->start
, map
->end
);
856 static u64
find_entry_trampoline(struct dso
*dso
)
858 /* Duplicates are removed so lookup all aliases */
859 const char *syms
[] = {
861 "__entry_trampoline_start",
862 "entry_SYSCALL_64_trampoline",
864 struct symbol
*sym
= dso__first_symbol(dso
, MAP__FUNCTION
);
867 for (; sym
; sym
= dso__next_symbol(sym
)) {
868 if (sym
->binding
!= STB_GLOBAL
)
870 for (i
= 0; i
< ARRAY_SIZE(syms
); i
++) {
871 if (!strcmp(sym
->name
, syms
[i
]))
880 * These values can be used for kernels that do not have symbols for the entry
881 * trampolines in kallsyms.
883 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
884 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
885 #define X86_64_ENTRY_TRAMPOLINE 0x6000
887 /* Map x86_64 PTI entry trampolines */
888 int machine__map_x86_64_entry_trampolines(struct machine
*machine
,
891 u64 pgoff
= find_entry_trampoline(kernel
);
892 int nr_cpus_avail
, cpu
;
897 nr_cpus_avail
= machine__nr_cpus_avail(machine
);
899 /* Add a 1 page map for each CPU's entry trampoline */
900 for (cpu
= 0; cpu
< nr_cpus_avail
; cpu
++) {
901 u64 va
= X86_64_CPU_ENTRY_AREA_PER_CPU
+
902 cpu
* X86_64_CPU_ENTRY_AREA_SIZE
+
903 X86_64_ENTRY_TRAMPOLINE
;
904 struct extra_kernel_map xm
= {
906 .end
= va
+ page_size
,
910 if (machine__create_extra_kernel_map(machine
, kernel
, &xm
) < 0)
917 int __machine__create_kernel_maps(struct machine
*machine
, struct dso
*kernel
)
922 if (machine__get_running_kernel_start(machine
, NULL
, &start
))
925 /* In case of renewal the kernel map, destroy previous one */
926 machine__destroy_kernel_maps(machine
);
928 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
932 machine
->vmlinux_maps
[type
] = map__new2(start
, kernel
, type
);
933 if (machine
->vmlinux_maps
[type
] == NULL
)
936 machine
->vmlinux_maps
[type
]->map_ip
=
937 machine
->vmlinux_maps
[type
]->unmap_ip
=
939 map
= __machine__kernel_map(machine
, type
);
940 kmap
= map__kmap(map
);
944 kmap
->kmaps
= &machine
->kmaps
;
945 map_groups__insert(&machine
->kmaps
, map
);
951 void machine__destroy_kernel_maps(struct machine
*machine
)
955 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
957 struct map
*map
= __machine__kernel_map(machine
, type
);
962 kmap
= map__kmap(map
);
963 map_groups__remove(&machine
->kmaps
, map
);
964 if (kmap
&& kmap
->ref_reloc_sym
) {
966 * ref_reloc_sym is shared among all maps, so free just
969 if (type
== MAP__FUNCTION
) {
970 zfree((char **)&kmap
->ref_reloc_sym
->name
);
971 zfree(&kmap
->ref_reloc_sym
);
973 kmap
->ref_reloc_sym
= NULL
;
976 map__put(machine
->vmlinux_maps
[type
]);
977 machine
->vmlinux_maps
[type
] = NULL
;
981 int machines__create_guest_kernel_maps(struct machines
*machines
)
984 struct dirent
**namelist
= NULL
;
990 if (symbol_conf
.default_guest_vmlinux_name
||
991 symbol_conf
.default_guest_modules
||
992 symbol_conf
.default_guest_kallsyms
) {
993 machines__create_kernel_maps(machines
, DEFAULT_GUEST_KERNEL_ID
);
996 if (symbol_conf
.guestmount
) {
997 items
= scandir(symbol_conf
.guestmount
, &namelist
, NULL
, NULL
);
1000 for (i
= 0; i
< items
; i
++) {
1001 if (!isdigit(namelist
[i
]->d_name
[0])) {
1002 /* Filter out . and .. */
1005 pid
= (pid_t
)strtol(namelist
[i
]->d_name
, &endp
, 10);
1006 if ((*endp
!= '\0') ||
1007 (endp
== namelist
[i
]->d_name
) ||
1008 (errno
== ERANGE
)) {
1009 pr_debug("invalid directory (%s). Skipping.\n",
1010 namelist
[i
]->d_name
);
1013 sprintf(path
, "%s/%s/proc/kallsyms",
1014 symbol_conf
.guestmount
,
1015 namelist
[i
]->d_name
);
1016 ret
= access(path
, R_OK
);
1018 pr_debug("Can't access file %s\n", path
);
1021 machines__create_kernel_maps(machines
, pid
);
1030 void machines__destroy_kernel_maps(struct machines
*machines
)
1032 struct rb_node
*next
= rb_first(&machines
->guests
);
1034 machine__destroy_kernel_maps(&machines
->host
);
1037 struct machine
*pos
= rb_entry(next
, struct machine
, rb_node
);
1039 next
= rb_next(&pos
->rb_node
);
1040 rb_erase(&pos
->rb_node
, &machines
->guests
);
1041 machine__delete(pos
);
1045 int machines__create_kernel_maps(struct machines
*machines
, pid_t pid
)
1047 struct machine
*machine
= machines__findnew(machines
, pid
);
1049 if (machine
== NULL
)
1052 return machine__create_kernel_maps(machine
);
1055 int __machine__load_kallsyms(struct machine
*machine
, const char *filename
,
1056 enum map_type type
, bool no_kcore
)
1058 struct map
*map
= machine__kernel_map(machine
);
1059 int ret
= __dso__load_kallsyms(map
->dso
, filename
, map
, no_kcore
);
1062 dso__set_loaded(map
->dso
, type
);
1064 * Since /proc/kallsyms will have multiple sessions for the
1065 * kernel, with modules between them, fixup the end of all
1068 __map_groups__fixup_end(&machine
->kmaps
, type
);
1074 int machine__load_kallsyms(struct machine
*machine
, const char *filename
,
1077 return __machine__load_kallsyms(machine
, filename
, type
, false);
1080 int machine__load_vmlinux_path(struct machine
*machine
, enum map_type type
)
1082 struct map
*map
= machine__kernel_map(machine
);
1083 int ret
= dso__load_vmlinux_path(map
->dso
, map
);
1086 dso__set_loaded(map
->dso
, type
);
1091 static void map_groups__fixup_end(struct map_groups
*mg
)
1094 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
1095 __map_groups__fixup_end(mg
, i
);
1098 static char *get_kernel_version(const char *root_dir
)
1100 char version
[PATH_MAX
];
1103 const char *prefix
= "Linux version ";
1105 sprintf(version
, "%s/proc/version", root_dir
);
1106 file
= fopen(version
, "r");
1111 tmp
= fgets(version
, sizeof(version
), file
);
1114 name
= strstr(version
, prefix
);
1117 name
+= strlen(prefix
);
1118 tmp
= strchr(name
, ' ');
1122 return strdup(name
);
1125 static bool is_kmod_dso(struct dso
*dso
)
1127 return dso
->symtab_type
== DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
||
1128 dso
->symtab_type
== DSO_BINARY_TYPE__GUEST_KMODULE
;
1131 static int map_groups__set_module_path(struct map_groups
*mg
, const char *path
,
1132 struct kmod_path
*m
)
1137 map
= map_groups__find_by_name(mg
, MAP__FUNCTION
, m
->name
);
1141 long_name
= strdup(path
);
1142 if (long_name
== NULL
)
1145 dso__set_long_name(map
->dso
, long_name
, true);
1146 dso__kernel_module_get_build_id(map
->dso
, "");
1149 * Full name could reveal us kmod compression, so
1150 * we need to update the symtab_type if needed.
1152 if (m
->comp
&& is_kmod_dso(map
->dso
))
1153 map
->dso
->symtab_type
++;
1158 static int map_groups__set_modules_path_dir(struct map_groups
*mg
,
1159 const char *dir_name
, int depth
)
1161 struct dirent
*dent
;
1162 DIR *dir
= opendir(dir_name
);
1166 pr_debug("%s: cannot open %s dir\n", __func__
, dir_name
);
1170 while ((dent
= readdir(dir
)) != NULL
) {
1171 char path
[PATH_MAX
];
1174 /*sshfs might return bad dent->d_type, so we have to stat*/
1175 snprintf(path
, sizeof(path
), "%s/%s", dir_name
, dent
->d_name
);
1176 if (stat(path
, &st
))
1179 if (S_ISDIR(st
.st_mode
)) {
1180 if (!strcmp(dent
->d_name
, ".") ||
1181 !strcmp(dent
->d_name
, ".."))
1184 /* Do not follow top-level source and build symlinks */
1186 if (!strcmp(dent
->d_name
, "source") ||
1187 !strcmp(dent
->d_name
, "build"))
1191 ret
= map_groups__set_modules_path_dir(mg
, path
,
1198 ret
= kmod_path__parse_name(&m
, dent
->d_name
);
1203 ret
= map_groups__set_module_path(mg
, path
, &m
);
1217 static int machine__set_modules_path(struct machine
*machine
)
1220 char modules_path
[PATH_MAX
];
1222 version
= get_kernel_version(machine
->root_dir
);
1226 snprintf(modules_path
, sizeof(modules_path
), "%s/lib/modules/%s",
1227 machine
->root_dir
, version
);
1230 return map_groups__set_modules_path_dir(&machine
->kmaps
, modules_path
, 0);
1232 int __weak
arch__fix_module_text_start(u64
*start __maybe_unused
,
1233 const char *name __maybe_unused
)
1238 static int machine__create_module(void *arg
, const char *name
, u64 start
,
1241 struct machine
*machine
= arg
;
1244 if (arch__fix_module_text_start(&start
, name
) < 0)
1247 map
= machine__findnew_module_map(machine
, start
, name
);
1250 map
->end
= start
+ size
;
1252 dso__kernel_module_get_build_id(map
->dso
, machine
->root_dir
);
1257 static int machine__create_modules(struct machine
*machine
)
1259 const char *modules
;
1260 char path
[PATH_MAX
];
1262 if (machine__is_default_guest(machine
)) {
1263 modules
= symbol_conf
.default_guest_modules
;
1265 snprintf(path
, PATH_MAX
, "%s/proc/modules", machine
->root_dir
);
1269 if (symbol__restricted_filename(modules
, "/proc/modules"))
1272 if (modules__parse(modules
, machine
, machine__create_module
))
1275 if (!machine__set_modules_path(machine
))
1278 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1283 int machine__create_kernel_maps(struct machine
*machine
)
1285 struct dso
*kernel
= machine__get_kernel(machine
);
1286 const char *name
= NULL
;
1293 ret
= __machine__create_kernel_maps(machine
, kernel
);
1298 if (symbol_conf
.use_modules
&& machine__create_modules(machine
) < 0) {
1299 if (machine__is_host(machine
))
1300 pr_debug("Problems creating module maps, "
1301 "continuing anyway...\n");
1303 pr_debug("Problems creating module maps for guest %d, "
1304 "continuing anyway...\n", machine
->pid
);
1308 * Now that we have all the maps created, just set the ->end of them:
1310 map_groups__fixup_end(&machine
->kmaps
);
1312 if (!machine__get_running_kernel_start(machine
, &name
, &addr
)) {
1314 maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
, name
, addr
)) {
1315 machine__destroy_kernel_maps(machine
);
1323 static void machine__set_kernel_mmap_len(struct machine
*machine
,
1324 union perf_event
*event
)
1328 for (i
= 0; i
< MAP__NR_TYPES
; i
++) {
1329 machine
->vmlinux_maps
[i
]->start
= event
->mmap
.start
;
1330 machine
->vmlinux_maps
[i
]->end
= (event
->mmap
.start
+
1333 * Be a bit paranoid here, some perf.data file came with
1334 * a zero sized synthesized MMAP event for the kernel.
1336 if (machine
->vmlinux_maps
[i
]->end
== 0)
1337 machine
->vmlinux_maps
[i
]->end
= ~0ULL;
1341 static bool machine__uses_kcore(struct machine
*machine
)
1345 list_for_each_entry(dso
, &machine
->dsos
.head
, node
) {
1346 if (dso__is_kcore(dso
))
1353 static int machine__process_kernel_mmap_event(struct machine
*machine
,
1354 union perf_event
*event
)
1357 char kmmap_prefix
[PATH_MAX
];
1358 enum dso_kernel_type kernel_type
;
1359 bool is_kernel_mmap
;
1361 /* If we have maps from kcore then we do not need or want any others */
1362 if (machine__uses_kcore(machine
))
1365 machine__mmap_name(machine
, kmmap_prefix
, sizeof(kmmap_prefix
));
1366 if (machine__is_host(machine
))
1367 kernel_type
= DSO_TYPE_KERNEL
;
1369 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
1371 is_kernel_mmap
= memcmp(event
->mmap
.filename
,
1373 strlen(kmmap_prefix
) - 1) == 0;
1374 if (event
->mmap
.filename
[0] == '/' ||
1375 (!is_kernel_mmap
&& event
->mmap
.filename
[0] == '[')) {
1376 map
= machine__findnew_module_map(machine
, event
->mmap
.start
,
1377 event
->mmap
.filename
);
1381 map
->end
= map
->start
+ event
->mmap
.len
;
1382 } else if (is_kernel_mmap
) {
1383 const char *symbol_name
= (event
->mmap
.filename
+
1384 strlen(kmmap_prefix
));
1386 * Should be there already, from the build-id table in
1389 struct dso
*kernel
= NULL
;
1392 pthread_rwlock_rdlock(&machine
->dsos
.lock
);
1394 list_for_each_entry(dso
, &machine
->dsos
.head
, node
) {
1397 * The cpumode passed to is_kernel_module is not the
1398 * cpumode of *this* event. If we insist on passing
1399 * correct cpumode to is_kernel_module, we should
1400 * record the cpumode when we adding this dso to the
1403 * However we don't really need passing correct
1404 * cpumode. We know the correct cpumode must be kernel
1405 * mode (if not, we should not link it onto kernel_dsos
1408 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1409 * is_kernel_module() treats it as a kernel cpumode.
1413 is_kernel_module(dso
->long_name
,
1414 PERF_RECORD_MISC_CPUMODE_UNKNOWN
))
1422 pthread_rwlock_unlock(&machine
->dsos
.lock
);
1425 kernel
= machine__findnew_dso(machine
, kmmap_prefix
);
1429 kernel
->kernel
= kernel_type
;
1430 if (__machine__create_kernel_maps(machine
, kernel
) < 0) {
1435 if (strstr(kernel
->long_name
, "vmlinux"))
1436 dso__set_short_name(kernel
, "[kernel.vmlinux]", false);
1438 machine__set_kernel_mmap_len(machine
, event
);
1441 * Avoid using a zero address (kptr_restrict) for the ref reloc
1442 * symbol. Effectively having zero here means that at record
1443 * time /proc/sys/kernel/kptr_restrict was non zero.
1445 if (event
->mmap
.pgoff
!= 0) {
1446 maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
,
1451 if (machine__is_default_guest(machine
)) {
1453 * preload dso of guest kernel and modules
1455 dso__load(kernel
, machine__kernel_map(machine
));
1463 int machine__process_mmap2_event(struct machine
*machine
,
1464 union perf_event
*event
,
1465 struct perf_sample
*sample
)
1467 struct thread
*thread
;
1473 perf_event__fprintf_mmap2(event
, stdout
);
1475 if (sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1476 sample
->cpumode
== PERF_RECORD_MISC_KERNEL
) {
1477 ret
= machine__process_kernel_mmap_event(machine
, event
);
1483 thread
= machine__findnew_thread(machine
, event
->mmap2
.pid
,
1488 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1489 type
= MAP__VARIABLE
;
1491 type
= MAP__FUNCTION
;
1493 map
= map__new(machine
, event
->mmap2
.start
,
1494 event
->mmap2
.len
, event
->mmap2
.pgoff
,
1496 event
->mmap2
.min
, event
->mmap2
.ino
,
1497 event
->mmap2
.ino_generation
,
1500 event
->mmap2
.filename
, type
, thread
);
1503 goto out_problem_map
;
1505 ret
= thread__insert_map(thread
, map
);
1507 goto out_problem_insert
;
1509 thread__put(thread
);
1516 thread__put(thread
);
1518 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1522 int machine__process_mmap_event(struct machine
*machine
, union perf_event
*event
,
1523 struct perf_sample
*sample
)
1525 struct thread
*thread
;
1531 perf_event__fprintf_mmap(event
, stdout
);
1533 if (sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1534 sample
->cpumode
== PERF_RECORD_MISC_KERNEL
) {
1535 ret
= machine__process_kernel_mmap_event(machine
, event
);
1541 thread
= machine__findnew_thread(machine
, event
->mmap
.pid
,
1546 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1547 type
= MAP__VARIABLE
;
1549 type
= MAP__FUNCTION
;
1551 map
= map__new(machine
, event
->mmap
.start
,
1552 event
->mmap
.len
, event
->mmap
.pgoff
,
1554 event
->mmap
.filename
,
1558 goto out_problem_map
;
1560 ret
= thread__insert_map(thread
, map
);
1562 goto out_problem_insert
;
1564 thread__put(thread
);
1571 thread__put(thread
);
1573 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1577 static void __machine__remove_thread(struct machine
*machine
, struct thread
*th
, bool lock
)
1579 if (machine
->last_match
== th
)
1580 machine
->last_match
= NULL
;
1582 BUG_ON(refcount_read(&th
->refcnt
) == 0);
1584 pthread_rwlock_wrlock(&machine
->threads_lock
);
1585 rb_erase_init(&th
->rb_node
, &machine
->threads
);
1586 RB_CLEAR_NODE(&th
->rb_node
);
1587 --machine
->nr_threads
;
1589 * Move it first to the dead_threads list, then drop the reference,
1590 * if this is the last reference, then the thread__delete destructor
1591 * will be called and we will remove it from the dead_threads list.
1593 list_add_tail(&th
->node
, &machine
->dead_threads
);
1595 pthread_rwlock_unlock(&machine
->threads_lock
);
1599 void machine__remove_thread(struct machine
*machine
, struct thread
*th
)
1601 return __machine__remove_thread(machine
, th
, true);
1604 int machine__process_fork_event(struct machine
*machine
, union perf_event
*event
,
1605 struct perf_sample
*sample
)
1607 struct thread
*thread
= machine__find_thread(machine
,
1610 struct thread
*parent
= machine__findnew_thread(machine
,
1616 perf_event__fprintf_task(event
, stdout
);
1619 * There may be an existing thread that is not actually the parent,
1620 * either because we are processing events out of order, or because the
1621 * (fork) event that would have removed the thread was lost. Assume the
1622 * latter case and continue on as best we can.
1624 if (parent
->pid_
!= (pid_t
)event
->fork
.ppid
) {
1625 dump_printf("removing erroneous parent thread %d/%d\n",
1626 parent
->pid_
, parent
->tid
);
1627 machine__remove_thread(machine
, parent
);
1628 thread__put(parent
);
1629 parent
= machine__findnew_thread(machine
, event
->fork
.ppid
,
1633 /* if a thread currently exists for the thread id remove it */
1634 if (thread
!= NULL
) {
1635 machine__remove_thread(machine
, thread
);
1636 thread__put(thread
);
1639 thread
= machine__findnew_thread(machine
, event
->fork
.pid
,
1642 if (thread
== NULL
|| parent
== NULL
||
1643 thread__fork(thread
, parent
, sample
->time
) < 0) {
1644 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1647 thread__put(thread
);
1648 thread__put(parent
);
1653 int machine__process_exit_event(struct machine
*machine
, union perf_event
*event
,
1654 struct perf_sample
*sample __maybe_unused
)
1656 struct thread
*thread
= machine__find_thread(machine
,
1661 perf_event__fprintf_task(event
, stdout
);
1663 if (thread
!= NULL
) {
1664 thread__exited(thread
);
1665 thread__put(thread
);
1671 int machine__process_event(struct machine
*machine
, union perf_event
*event
,
1672 struct perf_sample
*sample
)
1676 switch (event
->header
.type
) {
1677 case PERF_RECORD_COMM
:
1678 ret
= machine__process_comm_event(machine
, event
, sample
); break;
1679 case PERF_RECORD_MMAP
:
1680 ret
= machine__process_mmap_event(machine
, event
, sample
); break;
1681 case PERF_RECORD_NAMESPACES
:
1682 ret
= machine__process_namespaces_event(machine
, event
, sample
); break;
1683 case PERF_RECORD_MMAP2
:
1684 ret
= machine__process_mmap2_event(machine
, event
, sample
); break;
1685 case PERF_RECORD_FORK
:
1686 ret
= machine__process_fork_event(machine
, event
, sample
); break;
1687 case PERF_RECORD_EXIT
:
1688 ret
= machine__process_exit_event(machine
, event
, sample
); break;
1689 case PERF_RECORD_LOST
:
1690 ret
= machine__process_lost_event(machine
, event
, sample
); break;
1691 case PERF_RECORD_AUX
:
1692 ret
= machine__process_aux_event(machine
, event
); break;
1693 case PERF_RECORD_ITRACE_START
:
1694 ret
= machine__process_itrace_start_event(machine
, event
); break;
1695 case PERF_RECORD_LOST_SAMPLES
:
1696 ret
= machine__process_lost_samples_event(machine
, event
, sample
); break;
1697 case PERF_RECORD_SWITCH
:
1698 case PERF_RECORD_SWITCH_CPU_WIDE
:
1699 ret
= machine__process_switch_event(machine
, event
); break;
1708 static bool symbol__match_regex(struct symbol
*sym
, regex_t
*regex
)
1710 if (!regexec(regex
, sym
->name
, 0, NULL
, 0))
1715 static void ip__resolve_ams(struct thread
*thread
,
1716 struct addr_map_symbol
*ams
,
1719 struct addr_location al
;
1721 memset(&al
, 0, sizeof(al
));
1723 * We cannot use the header.misc hint to determine whether a
1724 * branch stack address is user, kernel, guest, hypervisor.
1725 * Branches may straddle the kernel/user/hypervisor boundaries.
1726 * Thus, we have to try consecutively until we find a match
1727 * or else, the symbol is unknown
1729 thread__find_cpumode_addr_location(thread
, MAP__FUNCTION
, ip
, &al
);
1732 ams
->al_addr
= al
.addr
;
1738 static void ip__resolve_data(struct thread
*thread
,
1739 u8 m
, struct addr_map_symbol
*ams
,
1740 u64 addr
, u64 phys_addr
)
1742 struct addr_location al
;
1744 memset(&al
, 0, sizeof(al
));
1746 thread__find_addr_location(thread
, m
, MAP__VARIABLE
, addr
, &al
);
1747 if (al
.map
== NULL
) {
1749 * some shared data regions have execute bit set which puts
1750 * their mapping in the MAP__FUNCTION type array.
1751 * Check there as a fallback option before dropping the sample.
1753 thread__find_addr_location(thread
, m
, MAP__FUNCTION
, addr
, &al
);
1757 ams
->al_addr
= al
.addr
;
1760 ams
->phys_addr
= phys_addr
;
1763 struct mem_info
*sample__resolve_mem(struct perf_sample
*sample
,
1764 struct addr_location
*al
)
1766 struct mem_info
*mi
= zalloc(sizeof(*mi
));
1771 ip__resolve_ams(al
->thread
, &mi
->iaddr
, sample
->ip
);
1772 ip__resolve_data(al
->thread
, al
->cpumode
, &mi
->daddr
,
1773 sample
->addr
, sample
->phys_addr
);
1774 mi
->data_src
.val
= sample
->data_src
;
1784 static int add_callchain_ip(struct thread
*thread
,
1785 struct callchain_cursor
*cursor
,
1786 struct symbol
**parent
,
1787 struct addr_location
*root_al
,
1791 struct branch_flags
*flags
,
1792 struct iterations
*iter
,
1795 struct addr_location al
;
1796 int nr_loop_iter
= 0;
1797 u64 iter_cycles
= 0;
1802 thread__find_cpumode_addr_location(thread
, MAP__FUNCTION
,
1805 if (ip
>= PERF_CONTEXT_MAX
) {
1807 case PERF_CONTEXT_HV
:
1808 *cpumode
= PERF_RECORD_MISC_HYPERVISOR
;
1810 case PERF_CONTEXT_KERNEL
:
1811 *cpumode
= PERF_RECORD_MISC_KERNEL
;
1813 case PERF_CONTEXT_USER
:
1814 *cpumode
= PERF_RECORD_MISC_USER
;
1817 pr_debug("invalid callchain context: "
1818 "%"PRId64
"\n", (s64
) ip
);
1820 * It seems the callchain is corrupted.
1823 callchain_cursor_reset(cursor
);
1828 thread__find_addr_location(thread
, *cpumode
, MAP__FUNCTION
,
1832 if (al
.sym
!= NULL
) {
1833 if (perf_hpp_list
.parent
&& !*parent
&&
1834 symbol__match_regex(al
.sym
, &parent_regex
))
1836 else if (have_ignore_callees
&& root_al
&&
1837 symbol__match_regex(al
.sym
, &ignore_callees_regex
)) {
1838 /* Treat this symbol as the root,
1839 forgetting its callees. */
1841 callchain_cursor_reset(cursor
);
1845 if (symbol_conf
.hide_unresolved
&& al
.sym
== NULL
)
1849 nr_loop_iter
= iter
->nr_loop_iter
;
1850 iter_cycles
= iter
->cycles
;
1853 return callchain_cursor_append(cursor
, al
.addr
, al
.map
, al
.sym
,
1854 branch
, flags
, nr_loop_iter
,
1855 iter_cycles
, branch_from
);
1858 struct branch_info
*sample__resolve_bstack(struct perf_sample
*sample
,
1859 struct addr_location
*al
)
1862 const struct branch_stack
*bs
= sample
->branch_stack
;
1863 struct branch_info
*bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
1868 for (i
= 0; i
< bs
->nr
; i
++) {
1869 ip__resolve_ams(al
->thread
, &bi
[i
].to
, bs
->entries
[i
].to
);
1870 ip__resolve_ams(al
->thread
, &bi
[i
].from
, bs
->entries
[i
].from
);
1871 bi
[i
].flags
= bs
->entries
[i
].flags
;
1876 static void save_iterations(struct iterations
*iter
,
1877 struct branch_entry
*be
, int nr
)
1881 iter
->nr_loop_iter
= nr
;
1884 for (i
= 0; i
< nr
; i
++)
1885 iter
->cycles
+= be
[i
].flags
.cycles
;
1890 #define NO_ENTRY 0xff
1892 #define PERF_MAX_BRANCH_DEPTH 127
1895 static int remove_loops(struct branch_entry
*l
, int nr
,
1896 struct iterations
*iter
)
1899 unsigned char chash
[CHASHSZ
];
1901 memset(chash
, NO_ENTRY
, sizeof(chash
));
1903 BUG_ON(PERF_MAX_BRANCH_DEPTH
> 255);
1905 for (i
= 0; i
< nr
; i
++) {
1906 int h
= hash_64(l
[i
].from
, CHASHBITS
) % CHASHSZ
;
1908 /* no collision handling for now */
1909 if (chash
[h
] == NO_ENTRY
) {
1911 } else if (l
[chash
[h
]].from
== l
[i
].from
) {
1912 bool is_loop
= true;
1913 /* check if it is a real loop */
1915 for (j
= chash
[h
]; j
< i
&& i
+ off
< nr
; j
++, off
++)
1916 if (l
[j
].from
!= l
[i
+ off
].from
) {
1923 save_iterations(iter
+ i
+ off
,
1926 memmove(iter
+ i
, iter
+ i
+ off
,
1929 memmove(l
+ i
, l
+ i
+ off
,
1941 * Recolve LBR callstack chain sample
1943 * 1 on success get LBR callchain information
1944 * 0 no available LBR callchain information, should try fp
1945 * negative error code on other errors.
1947 static int resolve_lbr_callchain_sample(struct thread
*thread
,
1948 struct callchain_cursor
*cursor
,
1949 struct perf_sample
*sample
,
1950 struct symbol
**parent
,
1951 struct addr_location
*root_al
,
1954 struct ip_callchain
*chain
= sample
->callchain
;
1955 int chain_nr
= min(max_stack
, (int)chain
->nr
), i
;
1956 u8 cpumode
= PERF_RECORD_MISC_USER
;
1957 u64 ip
, branch_from
= 0;
1959 for (i
= 0; i
< chain_nr
; i
++) {
1960 if (chain
->ips
[i
] == PERF_CONTEXT_USER
)
1964 /* LBR only affects the user callchain */
1965 if (i
!= chain_nr
) {
1966 struct branch_stack
*lbr_stack
= sample
->branch_stack
;
1967 int lbr_nr
= lbr_stack
->nr
, j
, k
;
1969 struct branch_flags
*flags
;
1971 * LBR callstack can only get user call chain.
1972 * The mix_chain_nr is kernel call chain
1973 * number plus LBR user call chain number.
1974 * i is kernel call chain number,
1975 * 1 is PERF_CONTEXT_USER,
1976 * lbr_nr + 1 is the user call chain number.
1977 * For details, please refer to the comments
1978 * in callchain__printf
1980 int mix_chain_nr
= i
+ 1 + lbr_nr
+ 1;
1982 for (j
= 0; j
< mix_chain_nr
; j
++) {
1987 if (callchain_param
.order
== ORDER_CALLEE
) {
1990 else if (j
> i
+ 1) {
1992 ip
= lbr_stack
->entries
[k
].from
;
1994 flags
= &lbr_stack
->entries
[k
].flags
;
1996 ip
= lbr_stack
->entries
[0].to
;
1998 flags
= &lbr_stack
->entries
[0].flags
;
2000 lbr_stack
->entries
[0].from
;
2005 ip
= lbr_stack
->entries
[k
].from
;
2007 flags
= &lbr_stack
->entries
[k
].flags
;
2009 else if (j
> lbr_nr
)
2010 ip
= chain
->ips
[i
+ 1 - (j
- lbr_nr
)];
2012 ip
= lbr_stack
->entries
[0].to
;
2014 flags
= &lbr_stack
->entries
[0].flags
;
2016 lbr_stack
->entries
[0].from
;
2020 err
= add_callchain_ip(thread
, cursor
, parent
,
2021 root_al
, &cpumode
, ip
,
2022 branch
, flags
, NULL
,
2025 return (err
< 0) ? err
: 0;
2033 static int thread__resolve_callchain_sample(struct thread
*thread
,
2034 struct callchain_cursor
*cursor
,
2035 struct perf_evsel
*evsel
,
2036 struct perf_sample
*sample
,
2037 struct symbol
**parent
,
2038 struct addr_location
*root_al
,
2041 struct branch_stack
*branch
= sample
->branch_stack
;
2042 struct ip_callchain
*chain
= sample
->callchain
;
2044 u8 cpumode
= PERF_RECORD_MISC_USER
;
2045 int i
, j
, err
, nr_entries
;
2050 chain_nr
= chain
->nr
;
2052 if (perf_evsel__has_branch_callstack(evsel
)) {
2053 err
= resolve_lbr_callchain_sample(thread
, cursor
, sample
, parent
,
2054 root_al
, max_stack
);
2056 return (err
< 0) ? err
: 0;
2060 * Based on DWARF debug information, some architectures skip
2061 * a callchain entry saved by the kernel.
2063 skip_idx
= arch_skip_callchain_idx(thread
, chain
);
2066 * Add branches to call stack for easier browsing. This gives
2067 * more context for a sample than just the callers.
2069 * This uses individual histograms of paths compared to the
2070 * aggregated histograms the normal LBR mode uses.
2072 * Limitations for now:
2073 * - No extra filters
2074 * - No annotations (should annotate somehow)
2077 if (branch
&& callchain_param
.branch_callstack
) {
2078 int nr
= min(max_stack
, (int)branch
->nr
);
2079 struct branch_entry be
[nr
];
2080 struct iterations iter
[nr
];
2082 if (branch
->nr
> PERF_MAX_BRANCH_DEPTH
) {
2083 pr_warning("corrupted branch chain. skipping...\n");
2087 for (i
= 0; i
< nr
; i
++) {
2088 if (callchain_param
.order
== ORDER_CALLEE
) {
2089 be
[i
] = branch
->entries
[i
];
2095 * Check for overlap into the callchain.
2096 * The return address is one off compared to
2097 * the branch entry. To adjust for this
2098 * assume the calling instruction is not longer
2101 if (i
== skip_idx
||
2102 chain
->ips
[first_call
] >= PERF_CONTEXT_MAX
)
2104 else if (be
[i
].from
< chain
->ips
[first_call
] &&
2105 be
[i
].from
>= chain
->ips
[first_call
] - 8)
2108 be
[i
] = branch
->entries
[branch
->nr
- i
- 1];
2111 memset(iter
, 0, sizeof(struct iterations
) * nr
);
2112 nr
= remove_loops(be
, nr
, iter
);
2114 for (i
= 0; i
< nr
; i
++) {
2115 err
= add_callchain_ip(thread
, cursor
, parent
,
2122 err
= add_callchain_ip(thread
, cursor
, parent
, root_al
,
2139 for (i
= first_call
, nr_entries
= 0;
2140 i
< chain_nr
&& nr_entries
< max_stack
; i
++) {
2143 if (callchain_param
.order
== ORDER_CALLEE
)
2146 j
= chain
->nr
- i
- 1;
2148 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2154 if (ip
< PERF_CONTEXT_MAX
)
2157 err
= add_callchain_ip(thread
, cursor
, parent
,
2158 root_al
, &cpumode
, ip
,
2159 false, NULL
, NULL
, 0);
2162 return (err
< 0) ? err
: 0;
2168 static int unwind_entry(struct unwind_entry
*entry
, void *arg
)
2170 struct callchain_cursor
*cursor
= arg
;
2172 if (symbol_conf
.hide_unresolved
&& entry
->sym
== NULL
)
2174 return callchain_cursor_append(cursor
, entry
->ip
,
2175 entry
->map
, entry
->sym
,
2176 false, NULL
, 0, 0, 0);
2179 static int thread__resolve_callchain_unwind(struct thread
*thread
,
2180 struct callchain_cursor
*cursor
,
2181 struct perf_evsel
*evsel
,
2182 struct perf_sample
*sample
,
2185 /* Can we do dwarf post unwind? */
2186 if (!((evsel
->attr
.sample_type
& PERF_SAMPLE_REGS_USER
) &&
2187 (evsel
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)))
2190 /* Bail out if nothing was captured. */
2191 if ((!sample
->user_regs
.regs
) ||
2192 (!sample
->user_stack
.size
))
2195 return unwind__get_entries(unwind_entry
, cursor
,
2196 thread
, sample
, max_stack
);
2199 int thread__resolve_callchain(struct thread
*thread
,
2200 struct callchain_cursor
*cursor
,
2201 struct perf_evsel
*evsel
,
2202 struct perf_sample
*sample
,
2203 struct symbol
**parent
,
2204 struct addr_location
*root_al
,
2209 callchain_cursor_reset(&callchain_cursor
);
2211 if (callchain_param
.order
== ORDER_CALLEE
) {
2212 ret
= thread__resolve_callchain_sample(thread
, cursor
,
2218 ret
= thread__resolve_callchain_unwind(thread
, cursor
,
2222 ret
= thread__resolve_callchain_unwind(thread
, cursor
,
2227 ret
= thread__resolve_callchain_sample(thread
, cursor
,
2236 int machine__for_each_thread(struct machine
*machine
,
2237 int (*fn
)(struct thread
*thread
, void *p
),
2241 struct thread
*thread
;
2244 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
2245 thread
= rb_entry(nd
, struct thread
, rb_node
);
2246 rc
= fn(thread
, priv
);
2251 list_for_each_entry(thread
, &machine
->dead_threads
, node
) {
2252 rc
= fn(thread
, priv
);
2259 int machines__for_each_thread(struct machines
*machines
,
2260 int (*fn
)(struct thread
*thread
, void *p
),
2266 rc
= machine__for_each_thread(&machines
->host
, fn
, priv
);
2270 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
2271 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
2273 rc
= machine__for_each_thread(machine
, fn
, priv
);
2280 int __machine__synthesize_threads(struct machine
*machine
, struct perf_tool
*tool
,
2281 struct target
*target
, struct thread_map
*threads
,
2282 perf_event__handler_t process
, bool data_mmap
,
2283 unsigned int proc_map_timeout
)
2285 if (target__has_task(target
))
2286 return perf_event__synthesize_thread_map(tool
, threads
, process
, machine
, data_mmap
, proc_map_timeout
);
2287 else if (target__has_cpu(target
))
2288 return perf_event__synthesize_threads(tool
, process
, machine
, data_mmap
, proc_map_timeout
);
2289 /* command specified */
2293 pid_t
machine__get_current_tid(struct machine
*machine
, int cpu
)
2295 if (cpu
< 0 || cpu
>= MAX_NR_CPUS
|| !machine
->current_tid
)
2298 return machine
->current_tid
[cpu
];
2301 int machine__set_current_tid(struct machine
*machine
, int cpu
, pid_t pid
,
2304 struct thread
*thread
;
2309 if (!machine
->current_tid
) {
2312 machine
->current_tid
= calloc(MAX_NR_CPUS
, sizeof(pid_t
));
2313 if (!machine
->current_tid
)
2315 for (i
= 0; i
< MAX_NR_CPUS
; i
++)
2316 machine
->current_tid
[i
] = -1;
2319 if (cpu
>= MAX_NR_CPUS
) {
2320 pr_err("Requested CPU %d too large. ", cpu
);
2321 pr_err("Consider raising MAX_NR_CPUS\n");
2325 machine
->current_tid
[cpu
] = tid
;
2327 thread
= machine__findnew_thread(machine
, pid
, tid
);
2332 thread__put(thread
);
2338 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
2339 * normalized arch is needed.
2341 bool machine__is(struct machine
*machine
, const char *arch
)
2343 return machine
&& !strcmp(perf_env__raw_arch(machine
->env
), arch
);
2346 int machine__nr_cpus_avail(struct machine
*machine
)
2348 return machine
? perf_env__nr_cpus_avail(machine
->env
) : 0;
2351 int machine__get_kernel_start(struct machine
*machine
)
2353 struct map
*map
= machine__kernel_map(machine
);
2357 * The only addresses above 2^63 are kernel addresses of a 64-bit
2358 * kernel. Note that addresses are unsigned so that on a 32-bit system
2359 * all addresses including kernel addresses are less than 2^32. In
2360 * that case (32-bit system), if the kernel mapping is unknown, all
2361 * addresses will be assumed to be in user space - see
2362 * machine__kernel_ip().
2364 machine
->kernel_start
= 1ULL << 63;
2366 err
= map__load(map
);
2368 * On x86_64, PTI entry trampolines are less than the
2369 * start of kernel text, but still above 2^63. So leave
2370 * kernel_start = 1ULL << 63 for x86_64.
2372 if (!err
&& !machine__is(machine
, "x86_64"))
2373 machine
->kernel_start
= map
->start
;
2378 struct dso
*machine__findnew_dso(struct machine
*machine
, const char *filename
)
2380 return dsos__findnew(&machine
->dsos
, filename
);
2383 char *machine__resolve_kernel_addr(void *vmachine
, unsigned long long *addrp
, char **modp
)
2385 struct machine
*machine
= vmachine
;
2387 struct symbol
*sym
= map_groups__find_symbol(&machine
->kmaps
, MAP__FUNCTION
, *addrp
, &map
);
2392 *modp
= __map__is_kmodule(map
) ? (char *)map
->dso
->short_name
: NULL
;
2393 *addrp
= map
->unmap_ip(map
, sym
->start
);