2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
14 #include "event-parse.h"
19 #include "thread_map.h"
21 #include <linux/hw_breakpoint.h>
22 #include <linux/perf_event.h>
23 #include "perf_regs.h"
25 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 static int __perf_evsel__sample_size(u64 sample_type
)
29 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
33 for (i
= 0; i
< 64; i
++) {
34 if (mask
& (1ULL << i
))
43 void hists__init(struct hists
*hists
)
45 memset(hists
, 0, sizeof(*hists
));
46 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
47 hists
->entries_in
= &hists
->entries_in_array
[0];
48 hists
->entries_collapsed
= RB_ROOT
;
49 hists
->entries
= RB_ROOT
;
50 pthread_mutex_init(&hists
->lock
, NULL
);
53 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
54 enum perf_event_sample_format bit
)
56 if (!(evsel
->attr
.sample_type
& bit
)) {
57 evsel
->attr
.sample_type
|= bit
;
58 evsel
->sample_size
+= sizeof(u64
);
62 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
63 enum perf_event_sample_format bit
)
65 if (evsel
->attr
.sample_type
& bit
) {
66 evsel
->attr
.sample_type
&= ~bit
;
67 evsel
->sample_size
-= sizeof(u64
);
71 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
)
73 perf_evsel__set_sample_bit(evsel
, ID
);
74 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
77 void perf_evsel__init(struct perf_evsel
*evsel
,
78 struct perf_event_attr
*attr
, int idx
)
82 evsel
->leader
= evsel
;
83 INIT_LIST_HEAD(&evsel
->node
);
84 hists__init(&evsel
->hists
);
85 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
88 struct perf_evsel
*perf_evsel__new(struct perf_event_attr
*attr
, int idx
)
90 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
93 perf_evsel__init(evsel
, attr
, idx
);
98 struct event_format
*event_format__new(const char *sys
, const char *name
)
102 void *bf
= NULL
, *nbf
;
103 size_t size
= 0, alloc_size
= 0;
104 struct event_format
*format
= NULL
;
106 if (asprintf(&filename
, "%s/%s/%s/format", tracing_events_path
, sys
, name
) < 0)
109 fd
= open(filename
, O_RDONLY
);
111 goto out_free_filename
;
114 if (size
== alloc_size
) {
115 alloc_size
+= BUFSIZ
;
116 nbf
= realloc(bf
, alloc_size
);
122 n
= read(fd
, bf
+ size
, BUFSIZ
);
128 pevent_parse_format(&format
, bf
, size
, sys
);
139 struct perf_evsel
*perf_evsel__newtp(const char *sys
, const char *name
, int idx
)
141 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
144 struct perf_event_attr attr
= {
145 .type
= PERF_TYPE_TRACEPOINT
,
146 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
147 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
150 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
153 evsel
->tp_format
= event_format__new(sys
, name
);
154 if (evsel
->tp_format
== NULL
)
157 event_attr_init(&attr
);
158 attr
.config
= evsel
->tp_format
->id
;
159 attr
.sample_period
= 1;
160 perf_evsel__init(evsel
, &attr
, idx
);
171 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
179 "stalled-cycles-frontend",
180 "stalled-cycles-backend",
184 static const char *__perf_evsel__hw_name(u64 config
)
186 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
187 return perf_evsel__hw_names
[config
];
189 return "unknown-hardware";
192 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
194 int colon
= 0, r
= 0;
195 struct perf_event_attr
*attr
= &evsel
->attr
;
196 bool exclude_guest_default
= false;
198 #define MOD_PRINT(context, mod) do { \
199 if (!attr->exclude_##context) { \
200 if (!colon) colon = ++r; \
201 r += scnprintf(bf + r, size - r, "%c", mod); \
204 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
205 MOD_PRINT(kernel
, 'k');
206 MOD_PRINT(user
, 'u');
208 exclude_guest_default
= true;
211 if (attr
->precise_ip
) {
214 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
215 exclude_guest_default
= true;
218 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
219 MOD_PRINT(host
, 'H');
220 MOD_PRINT(guest
, 'G');
228 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
230 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
231 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
234 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
246 static const char *__perf_evsel__sw_name(u64 config
)
248 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
249 return perf_evsel__sw_names
[config
];
250 return "unknown-software";
253 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
255 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
256 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
259 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
263 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
265 if (type
& HW_BREAKPOINT_R
)
266 r
+= scnprintf(bf
+ r
, size
- r
, "r");
268 if (type
& HW_BREAKPOINT_W
)
269 r
+= scnprintf(bf
+ r
, size
- r
, "w");
271 if (type
& HW_BREAKPOINT_X
)
272 r
+= scnprintf(bf
+ r
, size
- r
, "x");
277 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
279 struct perf_event_attr
*attr
= &evsel
->attr
;
280 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
281 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
284 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
285 [PERF_EVSEL__MAX_ALIASES
] = {
286 { "L1-dcache", "l1-d", "l1d", "L1-data", },
287 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
289 { "dTLB", "d-tlb", "Data-TLB", },
290 { "iTLB", "i-tlb", "Instruction-TLB", },
291 { "branch", "branches", "bpu", "btb", "bpc", },
295 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
296 [PERF_EVSEL__MAX_ALIASES
] = {
297 { "load", "loads", "read", },
298 { "store", "stores", "write", },
299 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
302 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
303 [PERF_EVSEL__MAX_ALIASES
] = {
304 { "refs", "Reference", "ops", "access", },
305 { "misses", "miss", },
308 #define C(x) PERF_COUNT_HW_CACHE_##x
309 #define CACHE_READ (1 << C(OP_READ))
310 #define CACHE_WRITE (1 << C(OP_WRITE))
311 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
312 #define COP(x) (1 << x)
315 * cache operartion stat
316 * L1I : Read and prefetch only
317 * ITLB and BPU : Read-only
319 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
320 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
321 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
322 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
323 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
324 [C(ITLB
)] = (CACHE_READ
),
325 [C(BPU
)] = (CACHE_READ
),
326 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
329 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
331 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
332 return true; /* valid */
334 return false; /* invalid */
337 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
338 char *bf
, size_t size
)
341 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
342 perf_evsel__hw_cache_op
[op
][0],
343 perf_evsel__hw_cache_result
[result
][0]);
346 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
347 perf_evsel__hw_cache_op
[op
][1]);
350 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
352 u8 op
, result
, type
= (config
>> 0) & 0xff;
353 const char *err
= "unknown-ext-hardware-cache-type";
355 if (type
> PERF_COUNT_HW_CACHE_MAX
)
358 op
= (config
>> 8) & 0xff;
359 err
= "unknown-ext-hardware-cache-op";
360 if (op
> PERF_COUNT_HW_CACHE_OP_MAX
)
363 result
= (config
>> 16) & 0xff;
364 err
= "unknown-ext-hardware-cache-result";
365 if (result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
368 err
= "invalid-cache";
369 if (!perf_evsel__is_cache_op_valid(type
, op
))
372 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
374 return scnprintf(bf
, size
, "%s", err
);
377 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
379 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
380 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
383 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
385 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
386 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
389 const char *perf_evsel__name(struct perf_evsel
*evsel
)
396 switch (evsel
->attr
.type
) {
398 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
401 case PERF_TYPE_HARDWARE
:
402 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
405 case PERF_TYPE_HW_CACHE
:
406 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
409 case PERF_TYPE_SOFTWARE
:
410 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
413 case PERF_TYPE_TRACEPOINT
:
414 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
417 case PERF_TYPE_BREAKPOINT
:
418 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
422 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
427 evsel
->name
= strdup(bf
);
429 return evsel
->name
?: "unknown";
433 * The enable_on_exec/disabled value strategy:
435 * 1) For any type of traced program:
436 * - all independent events and group leaders are disabled
437 * - all group members are enabled
439 * Group members are ruled by group leaders. They need to
440 * be enabled, because the group scheduling relies on that.
442 * 2) For traced programs executed by perf:
443 * - all independent events and group leaders have
445 * - we don't specifically enable or disable any event during
448 * Independent events and group leaders are initially disabled
449 * and get enabled by exec. Group members are ruled by group
450 * leaders as stated in 1).
452 * 3) For traced programs attached by perf (pid/tid):
453 * - we specifically enable or disable all events during
456 * When attaching events to already running traced we
457 * enable/disable events specifically, as there's no
458 * initial traced exec call.
460 void perf_evsel__config(struct perf_evsel
*evsel
,
461 struct perf_record_opts
*opts
)
463 struct perf_event_attr
*attr
= &evsel
->attr
;
464 int track
= !evsel
->idx
; /* only the first counter needs these */
466 attr
->sample_id_all
= opts
->sample_id_all_missing
? 0 : 1;
467 attr
->inherit
= !opts
->no_inherit
;
469 perf_evsel__set_sample_bit(evsel
, IP
);
470 perf_evsel__set_sample_bit(evsel
, TID
);
473 * We default some events to a 1 default interval. But keep
474 * it a weak assumption overridable by the user.
476 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
&&
477 opts
->user_interval
!= ULLONG_MAX
)) {
479 perf_evsel__set_sample_bit(evsel
, PERIOD
);
481 attr
->sample_freq
= opts
->freq
;
483 attr
->sample_period
= opts
->default_interval
;
487 if (opts
->no_samples
)
488 attr
->sample_freq
= 0;
490 if (opts
->inherit_stat
)
491 attr
->inherit_stat
= 1;
493 if (opts
->sample_address
) {
494 perf_evsel__set_sample_bit(evsel
, ADDR
);
495 attr
->mmap_data
= track
;
498 if (opts
->call_graph
) {
499 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
501 if (opts
->call_graph
== CALLCHAIN_DWARF
) {
502 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
503 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
504 attr
->sample_regs_user
= PERF_REGS_MASK
;
505 attr
->sample_stack_user
= opts
->stack_dump_size
;
506 attr
->exclude_callchain_user
= 1;
510 if (perf_target__has_cpu(&opts
->target
))
511 perf_evsel__set_sample_bit(evsel
, CPU
);
514 perf_evsel__set_sample_bit(evsel
, PERIOD
);
516 if (!opts
->sample_id_all_missing
&&
517 (opts
->sample_time
|| !opts
->no_inherit
||
518 perf_target__has_cpu(&opts
->target
)))
519 perf_evsel__set_sample_bit(evsel
, TIME
);
521 if (opts
->raw_samples
) {
522 perf_evsel__set_sample_bit(evsel
, TIME
);
523 perf_evsel__set_sample_bit(evsel
, RAW
);
524 perf_evsel__set_sample_bit(evsel
, CPU
);
527 if (opts
->no_delay
) {
529 attr
->wakeup_events
= 1;
531 if (opts
->branch_stack
) {
532 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
533 attr
->branch_sample_type
= opts
->branch_stack
;
540 * XXX see the function comment above
542 * Disabling only independent events or group leaders,
543 * keeping group members enabled.
545 if (perf_evsel__is_group_leader(evsel
))
549 * Setting enable_on_exec for independent events and
550 * group leaders for traced executed by perf.
552 if (perf_target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
))
553 attr
->enable_on_exec
= 1;
556 int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
559 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
562 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
563 for (thread
= 0; thread
< nthreads
; thread
++) {
564 FD(evsel
, cpu
, thread
) = -1;
569 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
572 int perf_evsel__set_filter(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
577 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
578 for (thread
= 0; thread
< nthreads
; thread
++) {
579 int fd
= FD(evsel
, cpu
, thread
),
580 err
= ioctl(fd
, PERF_EVENT_IOC_SET_FILTER
, filter
);
590 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
592 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
593 if (evsel
->sample_id
== NULL
)
596 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
597 if (evsel
->id
== NULL
) {
598 xyarray__delete(evsel
->sample_id
);
599 evsel
->sample_id
= NULL
;
606 int perf_evsel__alloc_counts(struct perf_evsel
*evsel
, int ncpus
)
608 evsel
->counts
= zalloc((sizeof(*evsel
->counts
) +
609 (ncpus
* sizeof(struct perf_counts_values
))));
610 return evsel
->counts
!= NULL
? 0 : -ENOMEM
;
613 void perf_evsel__free_fd(struct perf_evsel
*evsel
)
615 xyarray__delete(evsel
->fd
);
619 void perf_evsel__free_id(struct perf_evsel
*evsel
)
621 xyarray__delete(evsel
->sample_id
);
622 evsel
->sample_id
= NULL
;
627 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
631 for (cpu
= 0; cpu
< ncpus
; cpu
++)
632 for (thread
= 0; thread
< nthreads
; ++thread
) {
633 close(FD(evsel
, cpu
, thread
));
634 FD(evsel
, cpu
, thread
) = -1;
638 void perf_evsel__exit(struct perf_evsel
*evsel
)
640 assert(list_empty(&evsel
->node
));
641 xyarray__delete(evsel
->fd
);
642 xyarray__delete(evsel
->sample_id
);
646 void perf_evsel__delete(struct perf_evsel
*evsel
)
648 perf_evsel__exit(evsel
);
649 close_cgroup(evsel
->cgrp
);
650 free(evsel
->group_name
);
651 if (evsel
->tp_format
)
652 pevent_free_format(evsel
->tp_format
);
657 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
658 int cpu
, int thread
, bool scale
)
660 struct perf_counts_values count
;
661 size_t nv
= scale
? 3 : 1;
663 if (FD(evsel
, cpu
, thread
) < 0)
666 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1) < 0)
669 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
675 else if (count
.run
< count
.ena
)
676 count
.val
= (u64
)((double)count
.val
* count
.ena
/ count
.run
+ 0.5);
678 count
.ena
= count
.run
= 0;
680 evsel
->counts
->cpu
[cpu
] = count
;
684 int __perf_evsel__read(struct perf_evsel
*evsel
,
685 int ncpus
, int nthreads
, bool scale
)
687 size_t nv
= scale
? 3 : 1;
689 struct perf_counts_values
*aggr
= &evsel
->counts
->aggr
, count
;
691 aggr
->val
= aggr
->ena
= aggr
->run
= 0;
693 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
694 for (thread
= 0; thread
< nthreads
; thread
++) {
695 if (FD(evsel
, cpu
, thread
) < 0)
698 if (readn(FD(evsel
, cpu
, thread
),
699 &count
, nv
* sizeof(u64
)) < 0)
702 aggr
->val
+= count
.val
;
704 aggr
->ena
+= count
.ena
;
705 aggr
->run
+= count
.run
;
710 evsel
->counts
->scaled
= 0;
712 if (aggr
->run
== 0) {
713 evsel
->counts
->scaled
= -1;
718 if (aggr
->run
< aggr
->ena
) {
719 evsel
->counts
->scaled
= 1;
720 aggr
->val
= (u64
)((double)aggr
->val
* aggr
->ena
/ aggr
->run
+ 0.5);
723 aggr
->ena
= aggr
->run
= 0;
728 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
730 struct perf_evsel
*leader
= evsel
->leader
;
733 if (perf_evsel__is_group_leader(evsel
))
737 * Leader must be already processed/open,
742 fd
= FD(leader
, cpu
, thread
);
748 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
749 struct thread_map
*threads
)
752 unsigned long flags
= 0;
755 if (evsel
->fd
== NULL
&&
756 perf_evsel__alloc_fd(evsel
, cpus
->nr
, threads
->nr
) < 0)
760 flags
= PERF_FLAG_PID_CGROUP
;
761 pid
= evsel
->cgrp
->fd
;
764 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
766 for (thread
= 0; thread
< threads
->nr
; thread
++) {
770 pid
= threads
->map
[thread
];
772 group_fd
= get_group_fd(evsel
, cpu
, thread
);
774 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
778 if (FD(evsel
, cpu
, thread
) < 0) {
789 while (--thread
>= 0) {
790 close(FD(evsel
, cpu
, thread
));
791 FD(evsel
, cpu
, thread
) = -1;
793 thread
= threads
->nr
;
794 } while (--cpu
>= 0);
798 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
800 if (evsel
->fd
== NULL
)
803 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
804 perf_evsel__free_fd(evsel
);
817 struct thread_map map
;
819 } empty_thread_map
= {
824 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
825 struct thread_map
*threads
)
828 /* Work around old compiler warnings about strict aliasing */
829 cpus
= &empty_cpu_map
.map
;
833 threads
= &empty_thread_map
.map
;
835 return __perf_evsel__open(evsel
, cpus
, threads
);
838 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
839 struct cpu_map
*cpus
)
841 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
);
844 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
845 struct thread_map
*threads
)
847 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
);
850 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
851 const union perf_event
*event
,
852 struct perf_sample
*sample
)
854 u64 type
= evsel
->attr
.sample_type
;
855 const u64
*array
= event
->sample
.array
;
856 bool swapped
= evsel
->needs_swap
;
859 array
+= ((event
->header
.size
-
860 sizeof(event
->header
)) / sizeof(u64
)) - 1;
862 if (type
& PERF_SAMPLE_CPU
) {
865 /* undo swap of u64, then swap on individual u32s */
866 u
.val64
= bswap_64(u
.val64
);
867 u
.val32
[0] = bswap_32(u
.val32
[0]);
870 sample
->cpu
= u
.val32
[0];
874 if (type
& PERF_SAMPLE_STREAM_ID
) {
875 sample
->stream_id
= *array
;
879 if (type
& PERF_SAMPLE_ID
) {
884 if (type
& PERF_SAMPLE_TIME
) {
885 sample
->time
= *array
;
889 if (type
& PERF_SAMPLE_TID
) {
892 /* undo swap of u64, then swap on individual u32s */
893 u
.val64
= bswap_64(u
.val64
);
894 u
.val32
[0] = bswap_32(u
.val32
[0]);
895 u
.val32
[1] = bswap_32(u
.val32
[1]);
898 sample
->pid
= u
.val32
[0];
899 sample
->tid
= u
.val32
[1];
905 static bool sample_overlap(const union perf_event
*event
,
906 const void *offset
, u64 size
)
908 const void *base
= event
;
910 if (offset
+ size
> base
+ event
->header
.size
)
916 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
917 struct perf_sample
*data
)
919 u64 type
= evsel
->attr
.sample_type
;
920 u64 regs_user
= evsel
->attr
.sample_regs_user
;
921 bool swapped
= evsel
->needs_swap
;
925 * used for cross-endian analysis. See git commit 65014ab3
926 * for why this goofiness is needed.
930 memset(data
, 0, sizeof(*data
));
931 data
->cpu
= data
->pid
= data
->tid
= -1;
932 data
->stream_id
= data
->id
= data
->time
= -1ULL;
935 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
936 if (!evsel
->attr
.sample_id_all
)
938 return perf_evsel__parse_id_sample(evsel
, event
, data
);
941 array
= event
->sample
.array
;
943 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
946 if (type
& PERF_SAMPLE_IP
) {
947 data
->ip
= event
->ip
.ip
;
951 if (type
& PERF_SAMPLE_TID
) {
954 /* undo swap of u64, then swap on individual u32s */
955 u
.val64
= bswap_64(u
.val64
);
956 u
.val32
[0] = bswap_32(u
.val32
[0]);
957 u
.val32
[1] = bswap_32(u
.val32
[1]);
960 data
->pid
= u
.val32
[0];
961 data
->tid
= u
.val32
[1];
965 if (type
& PERF_SAMPLE_TIME
) {
971 if (type
& PERF_SAMPLE_ADDR
) {
977 if (type
& PERF_SAMPLE_ID
) {
982 if (type
& PERF_SAMPLE_STREAM_ID
) {
983 data
->stream_id
= *array
;
987 if (type
& PERF_SAMPLE_CPU
) {
991 /* undo swap of u64, then swap on individual u32s */
992 u
.val64
= bswap_64(u
.val64
);
993 u
.val32
[0] = bswap_32(u
.val32
[0]);
996 data
->cpu
= u
.val32
[0];
1000 if (type
& PERF_SAMPLE_PERIOD
) {
1001 data
->period
= *array
;
1005 if (type
& PERF_SAMPLE_READ
) {
1006 fprintf(stderr
, "PERF_SAMPLE_READ is unsupported for now\n");
1010 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1011 if (sample_overlap(event
, array
, sizeof(data
->callchain
->nr
)))
1014 data
->callchain
= (struct ip_callchain
*)array
;
1016 if (sample_overlap(event
, array
, data
->callchain
->nr
))
1019 array
+= 1 + data
->callchain
->nr
;
1022 if (type
& PERF_SAMPLE_RAW
) {
1026 if (WARN_ONCE(swapped
,
1027 "Endianness of raw data not corrected!\n")) {
1028 /* undo swap of u64, then swap on individual u32s */
1029 u
.val64
= bswap_64(u
.val64
);
1030 u
.val32
[0] = bswap_32(u
.val32
[0]);
1031 u
.val32
[1] = bswap_32(u
.val32
[1]);
1034 if (sample_overlap(event
, array
, sizeof(u32
)))
1037 data
->raw_size
= u
.val32
[0];
1038 pdata
= (void *) array
+ sizeof(u32
);
1040 if (sample_overlap(event
, pdata
, data
->raw_size
))
1043 data
->raw_data
= (void *) pdata
;
1045 array
= (void *)array
+ data
->raw_size
+ sizeof(u32
);
1048 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1051 data
->branch_stack
= (struct branch_stack
*)array
;
1054 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
1059 if (type
& PERF_SAMPLE_REGS_USER
) {
1060 /* First u64 tells us if we have any regs in sample. */
1061 u64 avail
= *array
++;
1064 data
->user_regs
.regs
= (u64
*)array
;
1065 array
+= hweight_long(regs_user
);
1069 if (type
& PERF_SAMPLE_STACK_USER
) {
1070 u64 size
= *array
++;
1072 data
->user_stack
.offset
= ((char *)(array
- 1)
1076 data
->user_stack
.size
= 0;
1078 data
->user_stack
.data
= (char *)array
;
1079 array
+= size
/ sizeof(*array
);
1080 data
->user_stack
.size
= *array
;
1087 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
1088 const struct perf_sample
*sample
,
1094 * used for cross-endian analysis. See git commit 65014ab3
1095 * for why this goofiness is needed.
1099 array
= event
->sample
.array
;
1101 if (type
& PERF_SAMPLE_IP
) {
1102 event
->ip
.ip
= sample
->ip
;
1106 if (type
& PERF_SAMPLE_TID
) {
1107 u
.val32
[0] = sample
->pid
;
1108 u
.val32
[1] = sample
->tid
;
1111 * Inverse of what is done in perf_evsel__parse_sample
1113 u
.val32
[0] = bswap_32(u
.val32
[0]);
1114 u
.val32
[1] = bswap_32(u
.val32
[1]);
1115 u
.val64
= bswap_64(u
.val64
);
1122 if (type
& PERF_SAMPLE_TIME
) {
1123 *array
= sample
->time
;
1127 if (type
& PERF_SAMPLE_ADDR
) {
1128 *array
= sample
->addr
;
1132 if (type
& PERF_SAMPLE_ID
) {
1133 *array
= sample
->id
;
1137 if (type
& PERF_SAMPLE_STREAM_ID
) {
1138 *array
= sample
->stream_id
;
1142 if (type
& PERF_SAMPLE_CPU
) {
1143 u
.val32
[0] = sample
->cpu
;
1146 * Inverse of what is done in perf_evsel__parse_sample
1148 u
.val32
[0] = bswap_32(u
.val32
[0]);
1149 u
.val64
= bswap_64(u
.val64
);
1155 if (type
& PERF_SAMPLE_PERIOD
) {
1156 *array
= sample
->period
;
1163 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
1165 return pevent_find_field(evsel
->tp_format
, name
);
1168 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1171 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1177 offset
= field
->offset
;
1179 if (field
->flags
& FIELD_IS_DYNAMIC
) {
1180 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
1184 return sample
->raw_data
+ offset
;
1187 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1190 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1197 ptr
= sample
->raw_data
+ field
->offset
;
1199 switch (field
->size
) {
1203 value
= *(u16
*)ptr
;
1206 value
= *(u32
*)ptr
;
1209 value
= *(u64
*)ptr
;
1215 if (!evsel
->needs_swap
)
1218 switch (field
->size
) {
1220 return bswap_16(value
);
1222 return bswap_32(value
);
1224 return bswap_64(value
);
1232 static int comma_fprintf(FILE *fp
, bool *first
, const char *fmt
, ...)
1238 ret
+= fprintf(fp
, ",");
1240 ret
+= fprintf(fp
, ":");
1244 va_start(args
, fmt
);
1245 ret
+= vfprintf(fp
, fmt
, args
);
1250 static int __if_fprintf(FILE *fp
, bool *first
, const char *field
, u64 value
)
1255 return comma_fprintf(fp
, first
, " %s: %" PRIu64
, field
, value
);
1258 #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1260 int perf_evsel__fprintf(struct perf_evsel
*evsel
,
1261 struct perf_attr_details
*details
, FILE *fp
)
1264 int printed
= fprintf(fp
, "%s", perf_evsel__name(evsel
));
1266 if (details
->verbose
|| details
->freq
) {
1267 printed
+= comma_fprintf(fp
, &first
, " sample_freq=%" PRIu64
,
1268 (u64
)evsel
->attr
.sample_freq
);
1271 if (details
->verbose
) {
1277 if_print(sample_type
);
1278 if_print(read_format
);
1282 if_print(exclusive
);
1283 if_print(exclude_user
);
1284 if_print(exclude_kernel
);
1285 if_print(exclude_hv
);
1286 if_print(exclude_idle
);
1290 if_print(inherit_stat
);
1291 if_print(enable_on_exec
);
1293 if_print(watermark
);
1294 if_print(precise_ip
);
1295 if_print(mmap_data
);
1296 if_print(sample_id_all
);
1297 if_print(exclude_host
);
1298 if_print(exclude_guest
);
1299 if_print(__reserved_1
);
1300 if_print(wakeup_events
);
1302 if_print(branch_sample_type
);