2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <lk/debugfs.h>
13 #include "thread_map.h"
19 #include "parse-events.h"
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
30 struct thread_map
*threads
)
34 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
35 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
36 INIT_LIST_HEAD(&evlist
->entries
);
37 perf_evlist__set_maps(evlist
, cpus
, threads
);
38 evlist
->workload
.pid
= -1;
41 struct perf_evlist
*perf_evlist__new(void)
43 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
46 perf_evlist__init(evlist
, NULL
, NULL
);
51 void perf_evlist__config(struct perf_evlist
*evlist
,
52 struct perf_record_opts
*opts
)
54 struct perf_evsel
*evsel
;
56 * Set the evsel leader links before we configure attributes,
57 * since some might depend on this info.
60 perf_evlist__set_leader(evlist
);
62 if (evlist
->cpus
->map
[0] < 0)
63 opts
->no_inherit
= true;
65 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
66 perf_evsel__config(evsel
, opts
);
68 if (evlist
->nr_entries
> 1)
69 perf_evsel__set_sample_id(evsel
);
73 static void perf_evlist__purge(struct perf_evlist
*evlist
)
75 struct perf_evsel
*pos
, *n
;
77 list_for_each_entry_safe(pos
, n
, &evlist
->entries
, node
) {
78 list_del_init(&pos
->node
);
79 perf_evsel__delete(pos
);
82 evlist
->nr_entries
= 0;
85 void perf_evlist__exit(struct perf_evlist
*evlist
)
90 evlist
->pollfd
= NULL
;
93 void perf_evlist__delete(struct perf_evlist
*evlist
)
95 perf_evlist__purge(evlist
);
96 perf_evlist__exit(evlist
);
100 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
102 list_add_tail(&entry
->node
, &evlist
->entries
);
103 ++evlist
->nr_entries
;
106 void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
107 struct list_head
*list
,
110 list_splice_tail(list
, &evlist
->entries
);
111 evlist
->nr_entries
+= nr_entries
;
114 void __perf_evlist__set_leader(struct list_head
*list
)
116 struct perf_evsel
*evsel
, *leader
;
118 leader
= list_entry(list
->next
, struct perf_evsel
, node
);
119 evsel
= list_entry(list
->prev
, struct perf_evsel
, node
);
121 leader
->nr_members
= evsel
->idx
- leader
->idx
+ 1;
123 list_for_each_entry(evsel
, list
, node
) {
124 evsel
->leader
= leader
;
128 void perf_evlist__set_leader(struct perf_evlist
*evlist
)
130 if (evlist
->nr_entries
) {
131 evlist
->nr_groups
= evlist
->nr_entries
> 1 ? 1 : 0;
132 __perf_evlist__set_leader(&evlist
->entries
);
136 int perf_evlist__add_default(struct perf_evlist
*evlist
)
138 struct perf_event_attr attr
= {
139 .type
= PERF_TYPE_HARDWARE
,
140 .config
= PERF_COUNT_HW_CPU_CYCLES
,
142 struct perf_evsel
*evsel
;
144 event_attr_init(&attr
);
146 evsel
= perf_evsel__new(&attr
, 0);
150 /* use strdup() because free(evsel) assumes name is allocated */
151 evsel
->name
= strdup("cycles");
155 perf_evlist__add(evlist
, evsel
);
158 perf_evsel__delete(evsel
);
163 static int perf_evlist__add_attrs(struct perf_evlist
*evlist
,
164 struct perf_event_attr
*attrs
, size_t nr_attrs
)
166 struct perf_evsel
*evsel
, *n
;
170 for (i
= 0; i
< nr_attrs
; i
++) {
171 evsel
= perf_evsel__new(attrs
+ i
, evlist
->nr_entries
+ i
);
173 goto out_delete_partial_list
;
174 list_add_tail(&evsel
->node
, &head
);
177 perf_evlist__splice_list_tail(evlist
, &head
, nr_attrs
);
181 out_delete_partial_list
:
182 list_for_each_entry_safe(evsel
, n
, &head
, node
)
183 perf_evsel__delete(evsel
);
187 int __perf_evlist__add_default_attrs(struct perf_evlist
*evlist
,
188 struct perf_event_attr
*attrs
, size_t nr_attrs
)
192 for (i
= 0; i
< nr_attrs
; i
++)
193 event_attr_init(attrs
+ i
);
195 return perf_evlist__add_attrs(evlist
, attrs
, nr_attrs
);
199 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
)
201 struct perf_evsel
*evsel
;
203 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
204 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
205 (int)evsel
->attr
.config
== id
)
212 int perf_evlist__add_newtp(struct perf_evlist
*evlist
,
213 const char *sys
, const char *name
, void *handler
)
215 struct perf_evsel
*evsel
;
217 evsel
= perf_evsel__newtp(sys
, name
, evlist
->nr_entries
);
221 evsel
->handler
.func
= handler
;
222 perf_evlist__add(evlist
, evsel
);
226 void perf_evlist__disable(struct perf_evlist
*evlist
)
229 struct perf_evsel
*pos
;
230 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
231 int nr_threads
= thread_map__nr(evlist
->threads
);
233 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
234 list_for_each_entry(pos
, &evlist
->entries
, node
) {
235 if (!perf_evsel__is_group_leader(pos
))
237 for (thread
= 0; thread
< nr_threads
; thread
++)
238 ioctl(FD(pos
, cpu
, thread
),
239 PERF_EVENT_IOC_DISABLE
, 0);
244 void perf_evlist__enable(struct perf_evlist
*evlist
)
247 struct perf_evsel
*pos
;
248 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
249 int nr_threads
= thread_map__nr(evlist
->threads
);
251 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
252 list_for_each_entry(pos
, &evlist
->entries
, node
) {
253 if (!perf_evsel__is_group_leader(pos
))
255 for (thread
= 0; thread
< nr_threads
; thread
++)
256 ioctl(FD(pos
, cpu
, thread
),
257 PERF_EVENT_IOC_ENABLE
, 0);
262 static int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
264 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
265 int nr_threads
= thread_map__nr(evlist
->threads
);
266 int nfds
= nr_cpus
* nr_threads
* evlist
->nr_entries
;
267 evlist
->pollfd
= malloc(sizeof(struct pollfd
) * nfds
);
268 return evlist
->pollfd
!= NULL
? 0 : -ENOMEM
;
271 void perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
273 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
274 evlist
->pollfd
[evlist
->nr_fds
].fd
= fd
;
275 evlist
->pollfd
[evlist
->nr_fds
].events
= POLLIN
;
279 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
280 struct perf_evsel
*evsel
,
281 int cpu
, int thread
, u64 id
)
284 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
288 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
289 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
292 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
293 int cpu
, int thread
, u64 id
)
295 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
296 evsel
->id
[evsel
->ids
++] = id
;
299 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
300 struct perf_evsel
*evsel
,
301 int cpu
, int thread
, int fd
)
303 u64 read_data
[4] = { 0, };
304 int id_idx
= 1; /* The first entry is the counter value */
306 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
307 read(fd
, &read_data
, sizeof(read_data
)) == -1)
310 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
312 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
315 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, read_data
[id_idx
]);
319 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
321 struct hlist_head
*head
;
322 struct perf_sample_id
*sid
;
325 if (evlist
->nr_entries
== 1)
326 return perf_evlist__first(evlist
);
328 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
329 head
= &evlist
->heads
[hash
];
331 hlist_for_each_entry(sid
, head
, node
)
335 if (!perf_evlist__sample_id_all(evlist
))
336 return perf_evlist__first(evlist
);
341 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
343 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
344 unsigned int head
= perf_mmap__read_head(md
);
345 unsigned int old
= md
->prev
;
346 unsigned char *data
= md
->base
+ page_size
;
347 union perf_event
*event
= NULL
;
349 if (evlist
->overwrite
) {
351 * If we're further behind than half the buffer, there's a chance
352 * the writer will bite our tail and mess up the samples under us.
354 * If we somehow ended up ahead of the head, we got messed up.
356 * In either case, truncate and restart at head.
358 int diff
= head
- old
;
359 if (diff
> md
->mask
/ 2 || diff
< 0) {
360 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
363 * head points to a known good entry, start there.
372 event
= (union perf_event
*)&data
[old
& md
->mask
];
373 size
= event
->header
.size
;
376 * Event straddles the mmap boundary -- header should always
377 * be inside due to u64 alignment of output.
379 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
380 unsigned int offset
= old
;
381 unsigned int len
= min(sizeof(*event
), size
), cpy
;
382 void *dst
= &md
->event_copy
;
385 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
386 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
392 event
= &md
->event_copy
;
400 if (!evlist
->overwrite
)
401 perf_mmap__write_tail(md
, old
);
406 void perf_evlist__munmap(struct perf_evlist
*evlist
)
410 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
411 if (evlist
->mmap
[i
].base
!= NULL
) {
412 munmap(evlist
->mmap
[i
].base
, evlist
->mmap_len
);
413 evlist
->mmap
[i
].base
= NULL
;
421 static int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
423 evlist
->nr_mmaps
= cpu_map__nr(evlist
->cpus
);
424 if (cpu_map__all(evlist
->cpus
))
425 evlist
->nr_mmaps
= thread_map__nr(evlist
->threads
);
426 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
427 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
430 static int __perf_evlist__mmap(struct perf_evlist
*evlist
,
431 int idx
, int prot
, int mask
, int fd
)
433 evlist
->mmap
[idx
].prev
= 0;
434 evlist
->mmap
[idx
].mask
= mask
;
435 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, prot
,
437 if (evlist
->mmap
[idx
].base
== MAP_FAILED
) {
438 evlist
->mmap
[idx
].base
= NULL
;
442 perf_evlist__add_pollfd(evlist
, fd
);
446 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
, int prot
, int mask
)
448 struct perf_evsel
*evsel
;
450 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
451 int nr_threads
= thread_map__nr(evlist
->threads
);
453 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
456 for (thread
= 0; thread
< nr_threads
; thread
++) {
457 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
458 int fd
= FD(evsel
, cpu
, thread
);
462 if (__perf_evlist__mmap(evlist
, cpu
,
463 prot
, mask
, output
) < 0)
466 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
470 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
471 perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
, fd
) < 0)
480 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
481 if (evlist
->mmap
[cpu
].base
!= NULL
) {
482 munmap(evlist
->mmap
[cpu
].base
, evlist
->mmap_len
);
483 evlist
->mmap
[cpu
].base
= NULL
;
489 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
, int prot
, int mask
)
491 struct perf_evsel
*evsel
;
493 int nr_threads
= thread_map__nr(evlist
->threads
);
495 for (thread
= 0; thread
< nr_threads
; thread
++) {
498 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
499 int fd
= FD(evsel
, 0, thread
);
503 if (__perf_evlist__mmap(evlist
, thread
,
504 prot
, mask
, output
) < 0)
507 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
511 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
512 perf_evlist__id_add_fd(evlist
, evsel
, 0, thread
, fd
) < 0)
520 for (thread
= 0; thread
< nr_threads
; thread
++) {
521 if (evlist
->mmap
[thread
].base
!= NULL
) {
522 munmap(evlist
->mmap
[thread
].base
, evlist
->mmap_len
);
523 evlist
->mmap
[thread
].base
= NULL
;
529 /** perf_evlist__mmap - Create per cpu maps to receive events
531 * @evlist - list of events
532 * @pages - map length in pages
533 * @overwrite - overwrite older events?
535 * If overwrite is false the user needs to signal event consuption using:
537 * struct perf_mmap *m = &evlist->mmap[cpu];
538 * unsigned int head = perf_mmap__read_head(m);
540 * perf_mmap__write_tail(m, head)
542 * Using perf_evlist__read_on_cpu does this automatically.
544 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
547 struct perf_evsel
*evsel
;
548 const struct cpu_map
*cpus
= evlist
->cpus
;
549 const struct thread_map
*threads
= evlist
->threads
;
550 int prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
), mask
;
552 /* 512 kiB: default amount of unprivileged mlocked memory */
553 if (pages
== UINT_MAX
)
554 pages
= (512 * 1024) / page_size
;
555 else if (!is_power_of_2(pages
))
558 mask
= pages
* page_size
- 1;
560 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
563 if (evlist
->pollfd
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
566 evlist
->overwrite
= overwrite
;
567 evlist
->mmap_len
= (pages
+ 1) * page_size
;
569 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
570 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
571 evsel
->sample_id
== NULL
&&
572 perf_evsel__alloc_id(evsel
, cpu_map__nr(cpus
), threads
->nr
) < 0)
576 if (cpu_map__all(cpus
))
577 return perf_evlist__mmap_per_thread(evlist
, prot
, mask
);
579 return perf_evlist__mmap_per_cpu(evlist
, prot
, mask
);
582 int perf_evlist__create_maps(struct perf_evlist
*evlist
,
583 struct perf_target
*target
)
585 evlist
->threads
= thread_map__new_str(target
->pid
, target
->tid
,
588 if (evlist
->threads
== NULL
)
591 if (perf_target__has_task(target
))
592 evlist
->cpus
= cpu_map__dummy_new();
593 else if (!perf_target__has_cpu(target
) && !target
->uses_mmap
)
594 evlist
->cpus
= cpu_map__dummy_new();
596 evlist
->cpus
= cpu_map__new(target
->cpu_list
);
598 if (evlist
->cpus
== NULL
)
599 goto out_delete_threads
;
604 thread_map__delete(evlist
->threads
);
608 void perf_evlist__delete_maps(struct perf_evlist
*evlist
)
610 cpu_map__delete(evlist
->cpus
);
611 thread_map__delete(evlist
->threads
);
613 evlist
->threads
= NULL
;
616 int perf_evlist__apply_filters(struct perf_evlist
*evlist
)
618 struct perf_evsel
*evsel
;
620 const int ncpus
= cpu_map__nr(evlist
->cpus
),
621 nthreads
= thread_map__nr(evlist
->threads
);
623 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
624 if (evsel
->filter
== NULL
)
627 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, evsel
->filter
);
635 int perf_evlist__set_filter(struct perf_evlist
*evlist
, const char *filter
)
637 struct perf_evsel
*evsel
;
639 const int ncpus
= cpu_map__nr(evlist
->cpus
),
640 nthreads
= thread_map__nr(evlist
->threads
);
642 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
643 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, filter
);
651 bool perf_evlist__valid_sample_type(struct perf_evlist
*evlist
)
653 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
655 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
656 if (first
->attr
.sample_type
!= pos
->attr
.sample_type
)
663 u64
perf_evlist__sample_type(struct perf_evlist
*evlist
)
665 struct perf_evsel
*first
= perf_evlist__first(evlist
);
666 return first
->attr
.sample_type
;
669 u16
perf_evlist__id_hdr_size(struct perf_evlist
*evlist
)
671 struct perf_evsel
*first
= perf_evlist__first(evlist
);
672 struct perf_sample
*data
;
676 if (!first
->attr
.sample_id_all
)
679 sample_type
= first
->attr
.sample_type
;
681 if (sample_type
& PERF_SAMPLE_TID
)
682 size
+= sizeof(data
->tid
) * 2;
684 if (sample_type
& PERF_SAMPLE_TIME
)
685 size
+= sizeof(data
->time
);
687 if (sample_type
& PERF_SAMPLE_ID
)
688 size
+= sizeof(data
->id
);
690 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
691 size
+= sizeof(data
->stream_id
);
693 if (sample_type
& PERF_SAMPLE_CPU
)
694 size
+= sizeof(data
->cpu
) * 2;
699 bool perf_evlist__valid_sample_id_all(struct perf_evlist
*evlist
)
701 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
703 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
704 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
711 bool perf_evlist__sample_id_all(struct perf_evlist
*evlist
)
713 struct perf_evsel
*first
= perf_evlist__first(evlist
);
714 return first
->attr
.sample_id_all
;
717 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
718 struct perf_evsel
*evsel
)
720 evlist
->selected
= evsel
;
723 void perf_evlist__close(struct perf_evlist
*evlist
)
725 struct perf_evsel
*evsel
;
726 int ncpus
= cpu_map__nr(evlist
->cpus
);
727 int nthreads
= thread_map__nr(evlist
->threads
);
729 list_for_each_entry_reverse(evsel
, &evlist
->entries
, node
)
730 perf_evsel__close(evsel
, ncpus
, nthreads
);
733 int perf_evlist__open(struct perf_evlist
*evlist
)
735 struct perf_evsel
*evsel
;
738 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
739 err
= perf_evsel__open(evsel
, evlist
->cpus
, evlist
->threads
);
746 perf_evlist__close(evlist
);
751 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
,
752 struct perf_target
*target
,
753 const char *argv
[], bool pipe_output
,
756 int child_ready_pipe
[2], go_pipe
[2];
759 if (pipe(child_ready_pipe
) < 0) {
760 perror("failed to create 'ready' pipe");
764 if (pipe(go_pipe
) < 0) {
765 perror("failed to create 'go' pipe");
766 goto out_close_ready_pipe
;
769 evlist
->workload
.pid
= fork();
770 if (evlist
->workload
.pid
< 0) {
771 perror("failed to fork");
772 goto out_close_pipes
;
775 if (!evlist
->workload
.pid
) {
779 close(child_ready_pipe
[0]);
781 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
784 * Do a dummy execvp to get the PLT entry resolved,
785 * so we avoid the resolver overhead on the real
788 execvp("", (char **)argv
);
791 * Tell the parent we're ready to go
793 close(child_ready_pipe
[1]);
796 * Wait until the parent tells us to go.
798 if (read(go_pipe
[0], &bf
, 1) == -1)
799 perror("unable to read pipe");
801 execvp(argv
[0], (char **)argv
);
805 kill(getppid(), SIGUSR1
);
809 if (perf_target__none(target
))
810 evlist
->threads
->map
[0] = evlist
->workload
.pid
;
812 close(child_ready_pipe
[1]);
815 * wait for child to settle
817 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
818 perror("unable to read pipe");
819 goto out_close_pipes
;
822 evlist
->workload
.cork_fd
= go_pipe
[1];
823 close(child_ready_pipe
[0]);
829 out_close_ready_pipe
:
830 close(child_ready_pipe
[0]);
831 close(child_ready_pipe
[1]);
835 int perf_evlist__start_workload(struct perf_evlist
*evlist
)
837 if (evlist
->workload
.cork_fd
> 0) {
839 * Remove the cork, let it rip!
841 return close(evlist
->workload
.cork_fd
);
847 int perf_evlist__parse_sample(struct perf_evlist
*evlist
, union perf_event
*event
,
848 struct perf_sample
*sample
)
850 struct perf_evsel
*evsel
= perf_evlist__first(evlist
);
851 return perf_evsel__parse_sample(evsel
, event
, sample
);
854 size_t perf_evlist__fprintf(struct perf_evlist
*evlist
, FILE *fp
)
856 struct perf_evsel
*evsel
;
859 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
860 printed
+= fprintf(fp
, "%s%s", evsel
->idx
? ", " : "",
861 perf_evsel__name(evsel
));
864 return printed
+ fprintf(fp
, "\n");;