1 #include <linux/types.h>
10 static const char *event__name
[] = {
12 [PERF_RECORD_MMAP
] = "MMAP",
13 [PERF_RECORD_LOST
] = "LOST",
14 [PERF_RECORD_COMM
] = "COMM",
15 [PERF_RECORD_EXIT
] = "EXIT",
16 [PERF_RECORD_THROTTLE
] = "THROTTLE",
17 [PERF_RECORD_UNTHROTTLE
] = "UNTHROTTLE",
18 [PERF_RECORD_FORK
] = "FORK",
19 [PERF_RECORD_READ
] = "READ",
20 [PERF_RECORD_SAMPLE
] = "SAMPLE",
21 [PERF_RECORD_HEADER_ATTR
] = "ATTR",
22 [PERF_RECORD_HEADER_EVENT_TYPE
] = "EVENT_TYPE",
23 [PERF_RECORD_HEADER_TRACING_DATA
] = "TRACING_DATA",
24 [PERF_RECORD_HEADER_BUILD_ID
] = "BUILD_ID",
25 [PERF_RECORD_FINISHED_ROUND
] = "FINISHED_ROUND",
28 const char *event__get_event_name(unsigned int id
)
30 if (id
>= ARRAY_SIZE(event__name
))
34 return event__name
[id
];
37 static struct sample_data synth_sample
= {
46 static pid_t
event__synthesize_comm(event_t
*event
, pid_t pid
, int full
,
47 event__handler_t process
,
48 struct perf_session
*session
)
50 char filename
[PATH_MAX
];
55 struct dirent dirent
, *next
;
58 snprintf(filename
, sizeof(filename
), "/proc/%d/status", pid
);
60 fp
= fopen(filename
, "r");
64 * We raced with a task exiting - just return:
66 pr_debug("couldn't open %s\n", filename
);
70 memset(&event
->comm
, 0, sizeof(event
->comm
));
72 while (!event
->comm
.comm
[0] || !event
->comm
.pid
) {
73 if (fgets(bf
, sizeof(bf
), fp
) == NULL
) {
74 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename
);
78 if (memcmp(bf
, "Name:", 5) == 0) {
80 while (*name
&& isspace(*name
))
82 size
= strlen(name
) - 1;
83 memcpy(event
->comm
.comm
, name
, size
++);
84 } else if (memcmp(bf
, "Tgid:", 5) == 0) {
86 while (*tgids
&& isspace(*tgids
))
88 tgid
= event
->comm
.pid
= atoi(tgids
);
92 event
->comm
.header
.type
= PERF_RECORD_COMM
;
93 size
= ALIGN(size
, sizeof(u64
));
94 memset(event
->comm
.comm
+ size
, 0, session
->id_hdr_size
);
95 event
->comm
.header
.size
= (sizeof(event
->comm
) -
96 (sizeof(event
->comm
.comm
) - size
) +
97 session
->id_hdr_size
);
99 event
->comm
.tid
= pid
;
101 process(event
, &synth_sample
, session
);
105 snprintf(filename
, sizeof(filename
), "/proc/%d/task", pid
);
107 tasks
= opendir(filename
);
111 while (!readdir_r(tasks
, &dirent
, &next
) && next
) {
113 pid
= strtol(dirent
.d_name
, &end
, 10);
117 event
->comm
.tid
= pid
;
119 process(event
, &synth_sample
, session
);
129 static int event__synthesize_mmap_events(event_t
*event
, pid_t pid
, pid_t tgid
,
130 event__handler_t process
,
131 struct perf_session
*session
)
133 char filename
[PATH_MAX
];
136 snprintf(filename
, sizeof(filename
), "/proc/%d/maps", pid
);
138 fp
= fopen(filename
, "r");
141 * We raced with a task exiting - just return:
143 pr_debug("couldn't open %s\n", filename
);
147 event
->header
.type
= PERF_RECORD_MMAP
;
149 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
151 event
->header
.misc
= PERF_RECORD_MISC_USER
;
154 char bf
[BUFSIZ
], *pbf
= bf
;
157 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
160 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
161 n
= hex2u64(pbf
, &event
->mmap
.start
);
165 n
= hex2u64(pbf
, &event
->mmap
.len
);
169 if (*pbf
== 'x') { /* vm_exec */
170 char *execname
= strchr(bf
, '/');
173 if (execname
== NULL
)
174 execname
= strstr(bf
, "[vdso]");
176 if (execname
== NULL
)
180 n
= hex2u64(pbf
, &event
->mmap
.pgoff
);
182 size
= strlen(execname
);
183 execname
[size
- 1] = '\0'; /* Remove \n */
184 memcpy(event
->mmap
.filename
, execname
, size
);
185 size
= ALIGN(size
, sizeof(u64
));
186 event
->mmap
.len
-= event
->mmap
.start
;
187 event
->mmap
.header
.size
= (sizeof(event
->mmap
) -
188 (sizeof(event
->mmap
.filename
) - size
));
189 memset(event
->mmap
.filename
+ size
, 0, session
->id_hdr_size
);
190 event
->mmap
.header
.size
+= session
->id_hdr_size
;
191 event
->mmap
.pid
= tgid
;
192 event
->mmap
.tid
= pid
;
194 process(event
, &synth_sample
, session
);
202 int event__synthesize_modules(event__handler_t process
,
203 struct perf_session
*session
,
204 struct machine
*machine
)
207 struct map_groups
*kmaps
= &machine
->kmaps
;
208 event_t
*event
= zalloc(sizeof(event
->mmap
) + session
->id_hdr_size
);
211 pr_debug("Not enough memory synthesizing mmap event "
212 "for kernel modules\n");
216 event
->header
.type
= PERF_RECORD_MMAP
;
219 * kernel uses 0 for user space maps, see kernel/perf_event.c
222 if (machine__is_host(machine
))
223 event
->header
.misc
= PERF_RECORD_MISC_KERNEL
;
225 event
->header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
227 for (nd
= rb_first(&kmaps
->maps
[MAP__FUNCTION
]);
228 nd
; nd
= rb_next(nd
)) {
230 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
232 if (pos
->dso
->kernel
)
235 size
= ALIGN(pos
->dso
->long_name_len
+ 1, sizeof(u64
));
236 event
->mmap
.header
.type
= PERF_RECORD_MMAP
;
237 event
->mmap
.header
.size
= (sizeof(event
->mmap
) -
238 (sizeof(event
->mmap
.filename
) - size
));
239 memset(event
->mmap
.filename
+ size
, 0, session
->id_hdr_size
);
240 event
->mmap
.header
.size
+= session
->id_hdr_size
;
241 event
->mmap
.start
= pos
->start
;
242 event
->mmap
.len
= pos
->end
- pos
->start
;
243 event
->mmap
.pid
= machine
->pid
;
245 memcpy(event
->mmap
.filename
, pos
->dso
->long_name
,
246 pos
->dso
->long_name_len
+ 1);
247 process(event
, &synth_sample
, session
);
254 static int __event__synthesize_thread(event_t
*comm_event
, event_t
*mmap_event
,
255 pid_t pid
, event__handler_t process
,
256 struct perf_session
*session
)
258 pid_t tgid
= event__synthesize_comm(comm_event
, pid
, 1, process
,
262 return event__synthesize_mmap_events(mmap_event
, pid
, tgid
,
266 int event__synthesize_thread(pid_t pid
, event__handler_t process
,
267 struct perf_session
*session
)
269 event_t
*comm_event
, *mmap_event
;
272 comm_event
= malloc(sizeof(comm_event
->comm
) + session
->id_hdr_size
);
273 if (comm_event
== NULL
)
276 mmap_event
= malloc(sizeof(mmap_event
->mmap
) + session
->id_hdr_size
);
277 if (mmap_event
== NULL
)
280 err
= __event__synthesize_thread(comm_event
, mmap_event
, pid
,
289 int event__synthesize_threads(event__handler_t process
,
290 struct perf_session
*session
)
293 struct dirent dirent
, *next
;
294 event_t
*comm_event
, *mmap_event
;
297 comm_event
= malloc(sizeof(comm_event
->comm
) + session
->id_hdr_size
);
298 if (comm_event
== NULL
)
301 mmap_event
= malloc(sizeof(mmap_event
->mmap
) + session
->id_hdr_size
);
302 if (mmap_event
== NULL
)
305 proc
= opendir("/proc");
309 while (!readdir_r(proc
, &dirent
, &next
) && next
) {
311 pid_t pid
= strtol(dirent
.d_name
, &end
, 10);
313 if (*end
) /* only interested in proper numerical dirents */
316 __event__synthesize_thread(comm_event
, mmap_event
, pid
,
330 struct process_symbol_args
{
335 static int find_symbol_cb(void *arg
, const char *name
, char type
,
336 u64 start
, u64 end __used
)
338 struct process_symbol_args
*args
= arg
;
341 * Must be a function or at least an alias, as in PARISC64, where "_text" is
342 * an 'A' to the same address as "_stext".
344 if (!(symbol_type__is_a(type
, MAP__FUNCTION
) ||
345 type
== 'A') || strcmp(name
, args
->name
))
352 int event__synthesize_kernel_mmap(event__handler_t process
,
353 struct perf_session
*session
,
354 struct machine
*machine
,
355 const char *symbol_name
)
358 const char *filename
, *mmap_name
;
360 char name_buff
[PATH_MAX
];
364 * We should get this from /sys/kernel/sections/.text, but till that is
365 * available use this, and after it is use this as a fallback for older
368 struct process_symbol_args args
= { .name
= symbol_name
, };
369 event_t
*event
= zalloc(sizeof(event
->mmap
) + session
->id_hdr_size
);
372 pr_debug("Not enough memory synthesizing mmap event "
373 "for kernel modules\n");
377 mmap_name
= machine__mmap_name(machine
, name_buff
, sizeof(name_buff
));
378 if (machine__is_host(machine
)) {
380 * kernel uses PERF_RECORD_MISC_USER for user space maps,
381 * see kernel/perf_event.c __perf_event_mmap
383 event
->header
.misc
= PERF_RECORD_MISC_KERNEL
;
384 filename
= "/proc/kallsyms";
386 event
->header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
387 if (machine__is_default_guest(machine
))
388 filename
= (char *) symbol_conf
.default_guest_kallsyms
;
390 sprintf(path
, "%s/proc/kallsyms", machine
->root_dir
);
395 if (kallsyms__parse(filename
, &args
, find_symbol_cb
) <= 0)
398 map
= machine
->vmlinux_maps
[MAP__FUNCTION
];
399 size
= snprintf(event
->mmap
.filename
, sizeof(event
->mmap
.filename
),
400 "%s%s", mmap_name
, symbol_name
) + 1;
401 size
= ALIGN(size
, sizeof(u64
));
402 event
->mmap
.header
.type
= PERF_RECORD_MMAP
;
403 event
->mmap
.header
.size
= (sizeof(event
->mmap
) -
404 (sizeof(event
->mmap
.filename
) - size
) + session
->id_hdr_size
);
405 event
->mmap
.pgoff
= args
.start
;
406 event
->mmap
.start
= map
->start
;
407 event
->mmap
.len
= map
->end
- event
->mmap
.start
;
408 event
->mmap
.pid
= machine
->pid
;
410 err
= process(event
, &synth_sample
, session
);
416 static void thread__comm_adjust(struct thread
*self
, struct hists
*hists
)
418 char *comm
= self
->comm
;
420 if (!symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
421 (!symbol_conf
.comm_list
||
422 strlist__has_entry(symbol_conf
.comm_list
, comm
))) {
423 u16 slen
= strlen(comm
);
425 if (hists__new_col_len(hists
, HISTC_COMM
, slen
))
426 hists__set_col_len(hists
, HISTC_THREAD
, slen
+ 6);
430 static int thread__set_comm_adjust(struct thread
*self
, const char *comm
,
433 int ret
= thread__set_comm(self
, comm
);
438 thread__comm_adjust(self
, hists
);
443 int event__process_comm(event_t
*self
, struct sample_data
*sample __used
,
444 struct perf_session
*session
)
446 struct thread
*thread
= perf_session__findnew(session
, self
->comm
.tid
);
448 dump_printf(": %s:%d\n", self
->comm
.comm
, self
->comm
.tid
);
450 if (thread
== NULL
|| thread__set_comm_adjust(thread
, self
->comm
.comm
,
452 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
459 int event__process_lost(event_t
*self
, struct sample_data
*sample __used
,
460 struct perf_session
*session
)
462 dump_printf(": id:%" PRIu64
": lost:%" PRIu64
"\n",
463 self
->lost
.id
, self
->lost
.lost
);
464 session
->hists
.stats
.total_lost
+= self
->lost
.lost
;
468 static void event_set_kernel_mmap_len(struct map
**maps
, event_t
*self
)
470 maps
[MAP__FUNCTION
]->start
= self
->mmap
.start
;
471 maps
[MAP__FUNCTION
]->end
= self
->mmap
.start
+ self
->mmap
.len
;
473 * Be a bit paranoid here, some perf.data file came with
474 * a zero sized synthesized MMAP event for the kernel.
476 if (maps
[MAP__FUNCTION
]->end
== 0)
477 maps
[MAP__FUNCTION
]->end
= ~0ULL;
480 static int event__process_kernel_mmap(event_t
*self
,
481 struct perf_session
*session
)
484 char kmmap_prefix
[PATH_MAX
];
485 struct machine
*machine
;
486 enum dso_kernel_type kernel_type
;
489 machine
= perf_session__findnew_machine(session
, self
->mmap
.pid
);
491 pr_err("Can't find id %d's machine\n", self
->mmap
.pid
);
495 machine__mmap_name(machine
, kmmap_prefix
, sizeof(kmmap_prefix
));
496 if (machine__is_host(machine
))
497 kernel_type
= DSO_TYPE_KERNEL
;
499 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
501 is_kernel_mmap
= memcmp(self
->mmap
.filename
,
503 strlen(kmmap_prefix
)) == 0;
504 if (self
->mmap
.filename
[0] == '/' ||
505 (!is_kernel_mmap
&& self
->mmap
.filename
[0] == '[')) {
507 char short_module_name
[1024];
510 if (self
->mmap
.filename
[0] == '/') {
511 name
= strrchr(self
->mmap
.filename
, '/');
516 dot
= strrchr(name
, '.');
519 snprintf(short_module_name
, sizeof(short_module_name
),
520 "[%.*s]", (int)(dot
- name
), name
);
521 strxfrchar(short_module_name
, '-', '_');
523 strcpy(short_module_name
, self
->mmap
.filename
);
525 map
= machine__new_module(machine
, self
->mmap
.start
,
526 self
->mmap
.filename
);
530 name
= strdup(short_module_name
);
534 map
->dso
->short_name
= name
;
535 map
->dso
->sname_alloc
= 1;
536 map
->end
= map
->start
+ self
->mmap
.len
;
537 } else if (is_kernel_mmap
) {
538 const char *symbol_name
= (self
->mmap
.filename
+
539 strlen(kmmap_prefix
));
541 * Should be there already, from the build-id table in
544 struct dso
*kernel
= __dsos__findnew(&machine
->kernel_dsos
,
549 kernel
->kernel
= kernel_type
;
550 if (__machine__create_kernel_maps(machine
, kernel
) < 0)
553 event_set_kernel_mmap_len(machine
->vmlinux_maps
, self
);
554 perf_session__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
,
557 if (machine__is_default_guest(machine
)) {
559 * preload dso of guest kernel and modules
561 dso__load(kernel
, machine
->vmlinux_maps
[MAP__FUNCTION
],
570 int event__process_mmap(event_t
*self
, struct sample_data
*sample __used
,
571 struct perf_session
*session
)
573 struct machine
*machine
;
574 struct thread
*thread
;
576 u8 cpumode
= self
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
579 dump_printf(" %d/%d: [%#" PRIx64
"(%#" PRIx64
") @ %#" PRIx64
"]: %s\n",
580 self
->mmap
.pid
, self
->mmap
.tid
, self
->mmap
.start
,
581 self
->mmap
.len
, self
->mmap
.pgoff
, self
->mmap
.filename
);
583 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
584 cpumode
== PERF_RECORD_MISC_KERNEL
) {
585 ret
= event__process_kernel_mmap(self
, session
);
591 machine
= perf_session__find_host_machine(session
);
594 thread
= perf_session__findnew(session
, self
->mmap
.pid
);
597 map
= map__new(&machine
->user_dsos
, self
->mmap
.start
,
598 self
->mmap
.len
, self
->mmap
.pgoff
,
599 self
->mmap
.pid
, self
->mmap
.filename
,
604 thread__insert_map(thread
, map
);
608 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
612 int event__process_task(event_t
*self
, struct sample_data
*sample __used
,
613 struct perf_session
*session
)
615 struct thread
*thread
= perf_session__findnew(session
, self
->fork
.tid
);
616 struct thread
*parent
= perf_session__findnew(session
, self
->fork
.ptid
);
618 dump_printf("(%d:%d):(%d:%d)\n", self
->fork
.pid
, self
->fork
.tid
,
619 self
->fork
.ppid
, self
->fork
.ptid
);
621 if (self
->header
.type
== PERF_RECORD_EXIT
) {
622 perf_session__remove_thread(session
, thread
);
626 if (thread
== NULL
|| parent
== NULL
||
627 thread__fork(thread
, parent
) < 0) {
628 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
635 int event__process(event_t
*event
, struct sample_data
*sample
,
636 struct perf_session
*session
)
638 switch (event
->header
.type
) {
639 case PERF_RECORD_COMM
:
640 event__process_comm(event
, sample
, session
);
642 case PERF_RECORD_MMAP
:
643 event__process_mmap(event
, sample
, session
);
645 case PERF_RECORD_FORK
:
646 case PERF_RECORD_EXIT
:
647 event__process_task(event
, sample
, session
);
656 void thread__find_addr_map(struct thread
*self
,
657 struct perf_session
*session
, u8 cpumode
,
658 enum map_type type
, pid_t pid
, u64 addr
,
659 struct addr_location
*al
)
661 struct map_groups
*mg
= &self
->mg
;
662 struct machine
*machine
= NULL
;
666 al
->cpumode
= cpumode
;
667 al
->filtered
= false;
669 if (cpumode
== PERF_RECORD_MISC_KERNEL
&& perf_host
) {
671 machine
= perf_session__find_host_machine(session
);
672 if (machine
== NULL
) {
676 mg
= &machine
->kmaps
;
677 } else if (cpumode
== PERF_RECORD_MISC_USER
&& perf_host
) {
679 machine
= perf_session__find_host_machine(session
);
680 } else if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
&& perf_guest
) {
682 machine
= perf_session__find_machine(session
, pid
);
683 if (machine
== NULL
) {
687 mg
= &machine
->kmaps
;
690 * 'u' means guest os user space.
691 * TODO: We don't support guest user space. Might support late.
693 if (cpumode
== PERF_RECORD_MISC_GUEST_USER
&& perf_guest
)
699 if ((cpumode
== PERF_RECORD_MISC_GUEST_USER
||
700 cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) &&
703 if ((cpumode
== PERF_RECORD_MISC_USER
||
704 cpumode
== PERF_RECORD_MISC_KERNEL
) &&
711 al
->map
= map_groups__find(mg
, type
, al
->addr
);
712 if (al
->map
== NULL
) {
714 * If this is outside of all known maps, and is a negative
715 * address, try to look it up in the kernel dso, as it might be
716 * a vsyscall or vdso (which executes in user-mode).
718 * XXX This is nasty, we should have a symbol list in the
719 * "[vdso]" dso, but for now lets use the old trick of looking
720 * in the whole kernel symbol list.
722 if ((long long)al
->addr
< 0 &&
723 cpumode
== PERF_RECORD_MISC_KERNEL
&&
724 machine
&& mg
!= &machine
->kmaps
) {
725 mg
= &machine
->kmaps
;
729 al
->addr
= al
->map
->map_ip(al
->map
, al
->addr
);
732 void thread__find_addr_location(struct thread
*self
,
733 struct perf_session
*session
, u8 cpumode
,
734 enum map_type type
, pid_t pid
, u64 addr
,
735 struct addr_location
*al
,
736 symbol_filter_t filter
)
738 thread__find_addr_map(self
, session
, cpumode
, type
, pid
, addr
, al
);
740 al
->sym
= map__find_symbol(al
->map
, al
->addr
, filter
);
745 static void dso__calc_col_width(struct dso
*self
, struct hists
*hists
)
747 if (!symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
748 (!symbol_conf
.dso_list
||
749 strlist__has_entry(symbol_conf
.dso_list
, self
->name
))) {
750 u16 slen
= dso__name_len(self
);
751 hists__new_col_len(hists
, HISTC_DSO
, slen
);
754 self
->slen_calculated
= 1;
757 int event__preprocess_sample(const event_t
*self
, struct perf_session
*session
,
758 struct addr_location
*al
, struct sample_data
*data
,
759 symbol_filter_t filter
)
761 u8 cpumode
= self
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
762 struct thread
*thread
= perf_session__findnew(session
, self
->ip
.pid
);
767 if (symbol_conf
.comm_list
&&
768 !strlist__has_entry(symbol_conf
.comm_list
, thread
->comm
))
771 dump_printf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
773 * Have we already created the kernel maps for the host machine?
775 * This should have happened earlier, when we processed the kernel MMAP
776 * events, but for older perf.data files there was no such thing, so do
779 if (cpumode
== PERF_RECORD_MISC_KERNEL
&&
780 session
->host_machine
.vmlinux_maps
[MAP__FUNCTION
] == NULL
)
781 machine__create_kernel_maps(&session
->host_machine
);
783 thread__find_addr_map(thread
, session
, cpumode
, MAP__FUNCTION
,
784 self
->ip
.pid
, self
->ip
.ip
, al
);
785 dump_printf(" ...... dso: %s\n",
786 al
->map
? al
->map
->dso
->long_name
:
787 al
->level
== 'H' ? "[hypervisor]" : "<not found>");
792 if (symbol_conf
.dso_list
&&
793 (!al
->map
|| !al
->map
->dso
||
794 !(strlist__has_entry(symbol_conf
.dso_list
,
795 al
->map
->dso
->short_name
) ||
796 (al
->map
->dso
->short_name
!= al
->map
->dso
->long_name
&&
797 strlist__has_entry(symbol_conf
.dso_list
,
798 al
->map
->dso
->long_name
)))))
801 * We have to do this here as we may have a dso with no symbol
802 * hit that has a name longer than the ones with symbols
805 if (!sort_dso
.elide
&& !al
->map
->dso
->slen_calculated
)
806 dso__calc_col_width(al
->map
->dso
, &session
->hists
);
808 al
->sym
= map__find_symbol(al
->map
, al
->addr
, filter
);
810 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
812 if (hists__col_len(&session
->hists
, HISTC_DSO
) < unresolved_col_width
&&
813 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
814 !symbol_conf
.dso_list
)
815 hists__set_col_len(&session
->hists
, HISTC_DSO
,
816 unresolved_col_width
);
819 if (symbol_conf
.sym_list
&& al
->sym
&&
820 !strlist__has_entry(symbol_conf
.sym_list
, al
->sym
->name
))
830 static int event__parse_id_sample(const event_t
*event
,
831 struct perf_session
*session
,
832 struct sample_data
*sample
)
837 sample
->cpu
= sample
->pid
= sample
->tid
= -1;
838 sample
->stream_id
= sample
->id
= sample
->time
= -1ULL;
840 if (!session
->sample_id_all
)
843 array
= event
->sample
.array
;
844 array
+= ((event
->header
.size
-
845 sizeof(event
->header
)) / sizeof(u64
)) - 1;
846 type
= session
->sample_type
;
848 if (type
& PERF_SAMPLE_CPU
) {
849 u32
*p
= (u32
*)array
;
854 if (type
& PERF_SAMPLE_STREAM_ID
) {
855 sample
->stream_id
= *array
;
859 if (type
& PERF_SAMPLE_ID
) {
864 if (type
& PERF_SAMPLE_TIME
) {
865 sample
->time
= *array
;
869 if (type
& PERF_SAMPLE_TID
) {
870 u32
*p
= (u32
*)array
;
878 int event__parse_sample(const event_t
*event
, struct perf_session
*session
,
879 struct sample_data
*data
)
884 if (event
->header
.type
!= PERF_RECORD_SAMPLE
)
885 return event__parse_id_sample(event
, session
, data
);
887 array
= event
->sample
.array
;
888 type
= session
->sample_type
;
890 if (type
& PERF_SAMPLE_IP
) {
891 data
->ip
= event
->ip
.ip
;
895 if (type
& PERF_SAMPLE_TID
) {
896 u32
*p
= (u32
*)array
;
902 if (type
& PERF_SAMPLE_TIME
) {
907 if (type
& PERF_SAMPLE_ADDR
) {
913 if (type
& PERF_SAMPLE_ID
) {
918 if (type
& PERF_SAMPLE_STREAM_ID
) {
919 data
->stream_id
= *array
;
923 if (type
& PERF_SAMPLE_CPU
) {
924 u32
*p
= (u32
*)array
;
930 if (type
& PERF_SAMPLE_PERIOD
) {
931 data
->period
= *array
;
935 if (type
& PERF_SAMPLE_READ
) {
936 pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
940 if (type
& PERF_SAMPLE_CALLCHAIN
) {
941 data
->callchain
= (struct ip_callchain
*)array
;
942 array
+= 1 + data
->callchain
->nr
;
945 if (type
& PERF_SAMPLE_RAW
) {
946 u32
*p
= (u32
*)array
;