2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <lk/debugfs.h>
13 #include "thread_map.h"
19 #include "parse-events.h"
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29 void perf_evlist__init(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
30 struct thread_map
*threads
)
34 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
35 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
36 INIT_LIST_HEAD(&evlist
->entries
);
37 perf_evlist__set_maps(evlist
, cpus
, threads
);
38 evlist
->workload
.pid
= -1;
41 struct perf_evlist
*perf_evlist__new(void)
43 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
46 perf_evlist__init(evlist
, NULL
, NULL
);
51 void perf_evlist__config(struct perf_evlist
*evlist
,
52 struct perf_record_opts
*opts
)
54 struct perf_evsel
*evsel
;
56 * Set the evsel leader links before we configure attributes,
57 * since some might depend on this info.
60 perf_evlist__set_leader(evlist
);
62 if (evlist
->cpus
->map
[0] < 0)
63 opts
->no_inherit
= true;
65 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
66 perf_evsel__config(evsel
, opts
);
68 if (evlist
->nr_entries
> 1)
69 perf_evsel__set_sample_id(evsel
);
73 static void perf_evlist__purge(struct perf_evlist
*evlist
)
75 struct perf_evsel
*pos
, *n
;
77 list_for_each_entry_safe(pos
, n
, &evlist
->entries
, node
) {
78 list_del_init(&pos
->node
);
79 perf_evsel__delete(pos
);
82 evlist
->nr_entries
= 0;
85 void perf_evlist__exit(struct perf_evlist
*evlist
)
90 evlist
->pollfd
= NULL
;
93 void perf_evlist__delete(struct perf_evlist
*evlist
)
95 perf_evlist__purge(evlist
);
96 perf_evlist__exit(evlist
);
100 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
102 list_add_tail(&entry
->node
, &evlist
->entries
);
103 ++evlist
->nr_entries
;
106 void perf_evlist__splice_list_tail(struct perf_evlist
*evlist
,
107 struct list_head
*list
,
110 list_splice_tail(list
, &evlist
->entries
);
111 evlist
->nr_entries
+= nr_entries
;
114 void __perf_evlist__set_leader(struct list_head
*list
)
116 struct perf_evsel
*evsel
, *leader
;
118 leader
= list_entry(list
->next
, struct perf_evsel
, node
);
119 evsel
= list_entry(list
->prev
, struct perf_evsel
, node
);
121 leader
->nr_members
= evsel
->idx
- leader
->idx
+ 1;
123 list_for_each_entry(evsel
, list
, node
) {
124 evsel
->leader
= leader
;
128 void perf_evlist__set_leader(struct perf_evlist
*evlist
)
130 if (evlist
->nr_entries
) {
131 evlist
->nr_groups
= evlist
->nr_entries
> 1 ? 1 : 0;
132 __perf_evlist__set_leader(&evlist
->entries
);
136 int perf_evlist__add_default(struct perf_evlist
*evlist
)
138 struct perf_event_attr attr
= {
139 .type
= PERF_TYPE_HARDWARE
,
140 .config
= PERF_COUNT_HW_CPU_CYCLES
,
142 struct perf_evsel
*evsel
;
144 event_attr_init(&attr
);
146 evsel
= perf_evsel__new(&attr
, 0);
150 /* use strdup() because free(evsel) assumes name is allocated */
151 evsel
->name
= strdup("cycles");
155 perf_evlist__add(evlist
, evsel
);
158 perf_evsel__delete(evsel
);
163 static int perf_evlist__add_attrs(struct perf_evlist
*evlist
,
164 struct perf_event_attr
*attrs
, size_t nr_attrs
)
166 struct perf_evsel
*evsel
, *n
;
170 for (i
= 0; i
< nr_attrs
; i
++) {
171 evsel
= perf_evsel__new(attrs
+ i
, evlist
->nr_entries
+ i
);
173 goto out_delete_partial_list
;
174 list_add_tail(&evsel
->node
, &head
);
177 perf_evlist__splice_list_tail(evlist
, &head
, nr_attrs
);
181 out_delete_partial_list
:
182 list_for_each_entry_safe(evsel
, n
, &head
, node
)
183 perf_evsel__delete(evsel
);
187 int __perf_evlist__add_default_attrs(struct perf_evlist
*evlist
,
188 struct perf_event_attr
*attrs
, size_t nr_attrs
)
192 for (i
= 0; i
< nr_attrs
; i
++)
193 event_attr_init(attrs
+ i
);
195 return perf_evlist__add_attrs(evlist
, attrs
, nr_attrs
);
199 perf_evlist__find_tracepoint_by_id(struct perf_evlist
*evlist
, int id
)
201 struct perf_evsel
*evsel
;
203 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
204 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
205 (int)evsel
->attr
.config
== id
)
212 int perf_evlist__add_newtp(struct perf_evlist
*evlist
,
213 const char *sys
, const char *name
, void *handler
)
215 struct perf_evsel
*evsel
;
217 evsel
= perf_evsel__newtp(sys
, name
, evlist
->nr_entries
);
221 evsel
->handler
.func
= handler
;
222 perf_evlist__add(evlist
, evsel
);
226 void perf_evlist__disable(struct perf_evlist
*evlist
)
229 struct perf_evsel
*pos
;
230 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
231 int nr_threads
= thread_map__nr(evlist
->threads
);
233 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
234 list_for_each_entry(pos
, &evlist
->entries
, node
) {
235 if (!perf_evsel__is_group_leader(pos
))
237 for (thread
= 0; thread
< nr_threads
; thread
++)
238 ioctl(FD(pos
, cpu
, thread
),
239 PERF_EVENT_IOC_DISABLE
, 0);
244 void perf_evlist__enable(struct perf_evlist
*evlist
)
247 struct perf_evsel
*pos
;
248 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
249 int nr_threads
= thread_map__nr(evlist
->threads
);
251 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
252 list_for_each_entry(pos
, &evlist
->entries
, node
) {
253 if (!perf_evsel__is_group_leader(pos
))
255 for (thread
= 0; thread
< nr_threads
; thread
++)
256 ioctl(FD(pos
, cpu
, thread
),
257 PERF_EVENT_IOC_ENABLE
, 0);
262 static int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
)
264 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
265 int nr_threads
= thread_map__nr(evlist
->threads
);
266 int nfds
= nr_cpus
* nr_threads
* evlist
->nr_entries
;
267 evlist
->pollfd
= malloc(sizeof(struct pollfd
) * nfds
);
268 return evlist
->pollfd
!= NULL
? 0 : -ENOMEM
;
271 void perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
273 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
274 evlist
->pollfd
[evlist
->nr_fds
].fd
= fd
;
275 evlist
->pollfd
[evlist
->nr_fds
].events
= POLLIN
;
279 static void perf_evlist__id_hash(struct perf_evlist
*evlist
,
280 struct perf_evsel
*evsel
,
281 int cpu
, int thread
, u64 id
)
284 struct perf_sample_id
*sid
= SID(evsel
, cpu
, thread
);
288 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
289 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
292 void perf_evlist__id_add(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
293 int cpu
, int thread
, u64 id
)
295 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, id
);
296 evsel
->id
[evsel
->ids
++] = id
;
299 static int perf_evlist__id_add_fd(struct perf_evlist
*evlist
,
300 struct perf_evsel
*evsel
,
301 int cpu
, int thread
, int fd
)
303 u64 read_data
[4] = { 0, };
304 int id_idx
= 1; /* The first entry is the counter value */
306 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
307 read(fd
, &read_data
, sizeof(read_data
)) == -1)
310 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
312 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
315 perf_evlist__id_add(evlist
, evsel
, cpu
, thread
, read_data
[id_idx
]);
319 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
321 struct hlist_head
*head
;
322 struct hlist_node
*pos
;
323 struct perf_sample_id
*sid
;
326 if (evlist
->nr_entries
== 1)
327 return perf_evlist__first(evlist
);
329 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
330 head
= &evlist
->heads
[hash
];
332 hlist_for_each_entry(sid
, pos
, head
, node
)
336 if (!perf_evlist__sample_id_all(evlist
))
337 return perf_evlist__first(evlist
);
342 union perf_event
*perf_evlist__mmap_read(struct perf_evlist
*evlist
, int idx
)
344 struct perf_mmap
*md
= &evlist
->mmap
[idx
];
345 unsigned int head
= perf_mmap__read_head(md
);
346 unsigned int old
= md
->prev
;
347 unsigned char *data
= md
->base
+ page_size
;
348 union perf_event
*event
= NULL
;
350 if (evlist
->overwrite
) {
352 * If we're further behind than half the buffer, there's a chance
353 * the writer will bite our tail and mess up the samples under us.
355 * If we somehow ended up ahead of the head, we got messed up.
357 * In either case, truncate and restart at head.
359 int diff
= head
- old
;
360 if (diff
> md
->mask
/ 2 || diff
< 0) {
361 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
364 * head points to a known good entry, start there.
373 event
= (union perf_event
*)&data
[old
& md
->mask
];
374 size
= event
->header
.size
;
377 * Event straddles the mmap boundary -- header should always
378 * be inside due to u64 alignment of output.
380 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
381 unsigned int offset
= old
;
382 unsigned int len
= min(sizeof(*event
), size
), cpy
;
383 void *dst
= &md
->event_copy
;
386 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
387 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
393 event
= &md
->event_copy
;
401 if (!evlist
->overwrite
)
402 perf_mmap__write_tail(md
, old
);
407 void perf_evlist__munmap(struct perf_evlist
*evlist
)
411 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
412 if (evlist
->mmap
[i
].base
!= NULL
) {
413 munmap(evlist
->mmap
[i
].base
, evlist
->mmap_len
);
414 evlist
->mmap
[i
].base
= NULL
;
422 static int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
)
424 evlist
->nr_mmaps
= cpu_map__nr(evlist
->cpus
);
425 if (cpu_map__all(evlist
->cpus
))
426 evlist
->nr_mmaps
= thread_map__nr(evlist
->threads
);
427 evlist
->mmap
= zalloc(evlist
->nr_mmaps
* sizeof(struct perf_mmap
));
428 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
431 static int __perf_evlist__mmap(struct perf_evlist
*evlist
,
432 int idx
, int prot
, int mask
, int fd
)
434 evlist
->mmap
[idx
].prev
= 0;
435 evlist
->mmap
[idx
].mask
= mask
;
436 evlist
->mmap
[idx
].base
= mmap(NULL
, evlist
->mmap_len
, prot
,
438 if (evlist
->mmap
[idx
].base
== MAP_FAILED
) {
439 evlist
->mmap
[idx
].base
= NULL
;
443 perf_evlist__add_pollfd(evlist
, fd
);
447 static int perf_evlist__mmap_per_cpu(struct perf_evlist
*evlist
, int prot
, int mask
)
449 struct perf_evsel
*evsel
;
451 int nr_cpus
= cpu_map__nr(evlist
->cpus
);
452 int nr_threads
= thread_map__nr(evlist
->threads
);
454 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
457 for (thread
= 0; thread
< nr_threads
; thread
++) {
458 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
459 int fd
= FD(evsel
, cpu
, thread
);
463 if (__perf_evlist__mmap(evlist
, cpu
,
464 prot
, mask
, output
) < 0)
467 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
471 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
472 perf_evlist__id_add_fd(evlist
, evsel
, cpu
, thread
, fd
) < 0)
481 for (cpu
= 0; cpu
< nr_cpus
; cpu
++) {
482 if (evlist
->mmap
[cpu
].base
!= NULL
) {
483 munmap(evlist
->mmap
[cpu
].base
, evlist
->mmap_len
);
484 evlist
->mmap
[cpu
].base
= NULL
;
490 static int perf_evlist__mmap_per_thread(struct perf_evlist
*evlist
, int prot
, int mask
)
492 struct perf_evsel
*evsel
;
494 int nr_threads
= thread_map__nr(evlist
->threads
);
496 for (thread
= 0; thread
< nr_threads
; thread
++) {
499 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
500 int fd
= FD(evsel
, 0, thread
);
504 if (__perf_evlist__mmap(evlist
, thread
,
505 prot
, mask
, output
) < 0)
508 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
, output
) != 0)
512 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
513 perf_evlist__id_add_fd(evlist
, evsel
, 0, thread
, fd
) < 0)
521 for (thread
= 0; thread
< nr_threads
; thread
++) {
522 if (evlist
->mmap
[thread
].base
!= NULL
) {
523 munmap(evlist
->mmap
[thread
].base
, evlist
->mmap_len
);
524 evlist
->mmap
[thread
].base
= NULL
;
530 /** perf_evlist__mmap - Create per cpu maps to receive events
532 * @evlist - list of events
533 * @pages - map length in pages
534 * @overwrite - overwrite older events?
536 * If overwrite is false the user needs to signal event consuption using:
538 * struct perf_mmap *m = &evlist->mmap[cpu];
539 * unsigned int head = perf_mmap__read_head(m);
541 * perf_mmap__write_tail(m, head)
543 * Using perf_evlist__read_on_cpu does this automatically.
545 int perf_evlist__mmap(struct perf_evlist
*evlist
, unsigned int pages
,
548 struct perf_evsel
*evsel
;
549 const struct cpu_map
*cpus
= evlist
->cpus
;
550 const struct thread_map
*threads
= evlist
->threads
;
551 int prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
), mask
;
553 /* 512 kiB: default amount of unprivileged mlocked memory */
554 if (pages
== UINT_MAX
)
555 pages
= (512 * 1024) / page_size
;
556 else if (!is_power_of_2(pages
))
559 mask
= pages
* page_size
- 1;
561 if (evlist
->mmap
== NULL
&& perf_evlist__alloc_mmap(evlist
) < 0)
564 if (evlist
->pollfd
== NULL
&& perf_evlist__alloc_pollfd(evlist
) < 0)
567 evlist
->overwrite
= overwrite
;
568 evlist
->mmap_len
= (pages
+ 1) * page_size
;
570 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
571 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
572 evsel
->sample_id
== NULL
&&
573 perf_evsel__alloc_id(evsel
, cpu_map__nr(cpus
), threads
->nr
) < 0)
577 if (cpu_map__all(cpus
))
578 return perf_evlist__mmap_per_thread(evlist
, prot
, mask
);
580 return perf_evlist__mmap_per_cpu(evlist
, prot
, mask
);
583 int perf_evlist__create_maps(struct perf_evlist
*evlist
,
584 struct perf_target
*target
)
586 evlist
->threads
= thread_map__new_str(target
->pid
, target
->tid
,
589 if (evlist
->threads
== NULL
)
592 if (perf_target__has_task(target
))
593 evlist
->cpus
= cpu_map__dummy_new();
594 else if (!perf_target__has_cpu(target
) && !target
->uses_mmap
)
595 evlist
->cpus
= cpu_map__dummy_new();
597 evlist
->cpus
= cpu_map__new(target
->cpu_list
);
599 if (evlist
->cpus
== NULL
)
600 goto out_delete_threads
;
605 thread_map__delete(evlist
->threads
);
609 void perf_evlist__delete_maps(struct perf_evlist
*evlist
)
611 cpu_map__delete(evlist
->cpus
);
612 thread_map__delete(evlist
->threads
);
614 evlist
->threads
= NULL
;
617 int perf_evlist__apply_filters(struct perf_evlist
*evlist
)
619 struct perf_evsel
*evsel
;
621 const int ncpus
= cpu_map__nr(evlist
->cpus
),
622 nthreads
= thread_map__nr(evlist
->threads
);
624 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
625 if (evsel
->filter
== NULL
)
628 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, evsel
->filter
);
636 int perf_evlist__set_filter(struct perf_evlist
*evlist
, const char *filter
)
638 struct perf_evsel
*evsel
;
640 const int ncpus
= cpu_map__nr(evlist
->cpus
),
641 nthreads
= thread_map__nr(evlist
->threads
);
643 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
644 err
= perf_evsel__set_filter(evsel
, ncpus
, nthreads
, filter
);
652 bool perf_evlist__valid_sample_type(struct perf_evlist
*evlist
)
654 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
656 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
657 if (first
->attr
.sample_type
!= pos
->attr
.sample_type
)
664 u64
perf_evlist__sample_type(struct perf_evlist
*evlist
)
666 struct perf_evsel
*first
= perf_evlist__first(evlist
);
667 return first
->attr
.sample_type
;
670 u16
perf_evlist__id_hdr_size(struct perf_evlist
*evlist
)
672 struct perf_evsel
*first
= perf_evlist__first(evlist
);
673 struct perf_sample
*data
;
677 if (!first
->attr
.sample_id_all
)
680 sample_type
= first
->attr
.sample_type
;
682 if (sample_type
& PERF_SAMPLE_TID
)
683 size
+= sizeof(data
->tid
) * 2;
685 if (sample_type
& PERF_SAMPLE_TIME
)
686 size
+= sizeof(data
->time
);
688 if (sample_type
& PERF_SAMPLE_ID
)
689 size
+= sizeof(data
->id
);
691 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
692 size
+= sizeof(data
->stream_id
);
694 if (sample_type
& PERF_SAMPLE_CPU
)
695 size
+= sizeof(data
->cpu
) * 2;
700 bool perf_evlist__valid_sample_id_all(struct perf_evlist
*evlist
)
702 struct perf_evsel
*first
= perf_evlist__first(evlist
), *pos
= first
;
704 list_for_each_entry_continue(pos
, &evlist
->entries
, node
) {
705 if (first
->attr
.sample_id_all
!= pos
->attr
.sample_id_all
)
712 bool perf_evlist__sample_id_all(struct perf_evlist
*evlist
)
714 struct perf_evsel
*first
= perf_evlist__first(evlist
);
715 return first
->attr
.sample_id_all
;
718 void perf_evlist__set_selected(struct perf_evlist
*evlist
,
719 struct perf_evsel
*evsel
)
721 evlist
->selected
= evsel
;
724 int perf_evlist__open(struct perf_evlist
*evlist
)
726 struct perf_evsel
*evsel
;
727 int err
, ncpus
, nthreads
;
729 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
730 err
= perf_evsel__open(evsel
, evlist
->cpus
, evlist
->threads
);
737 ncpus
= cpu_map__nr(evlist
->cpus
);
738 nthreads
= thread_map__nr(evlist
->threads
);
740 list_for_each_entry_reverse(evsel
, &evlist
->entries
, node
)
741 perf_evsel__close(evsel
, ncpus
, nthreads
);
747 int perf_evlist__prepare_workload(struct perf_evlist
*evlist
,
748 struct perf_target
*target
,
749 struct perf_record_opts
*opts
,
752 int child_ready_pipe
[2], go_pipe
[2];
755 if (pipe(child_ready_pipe
) < 0) {
756 perror("failed to create 'ready' pipe");
760 if (pipe(go_pipe
) < 0) {
761 perror("failed to create 'go' pipe");
762 goto out_close_ready_pipe
;
765 evlist
->workload
.pid
= fork();
766 if (evlist
->workload
.pid
< 0) {
767 perror("failed to fork");
768 goto out_close_pipes
;
771 if (!evlist
->workload
.pid
) {
772 if (opts
->pipe_output
)
775 close(child_ready_pipe
[0]);
777 fcntl(go_pipe
[0], F_SETFD
, FD_CLOEXEC
);
780 * Do a dummy execvp to get the PLT entry resolved,
781 * so we avoid the resolver overhead on the real
784 execvp("", (char **)argv
);
787 * Tell the parent we're ready to go
789 close(child_ready_pipe
[1]);
792 * Wait until the parent tells us to go.
794 if (read(go_pipe
[0], &bf
, 1) == -1)
795 perror("unable to read pipe");
797 execvp(argv
[0], (char **)argv
);
800 kill(getppid(), SIGUSR1
);
804 if (perf_target__none(target
))
805 evlist
->threads
->map
[0] = evlist
->workload
.pid
;
807 close(child_ready_pipe
[1]);
810 * wait for child to settle
812 if (read(child_ready_pipe
[0], &bf
, 1) == -1) {
813 perror("unable to read pipe");
814 goto out_close_pipes
;
817 evlist
->workload
.cork_fd
= go_pipe
[1];
818 close(child_ready_pipe
[0]);
824 out_close_ready_pipe
:
825 close(child_ready_pipe
[0]);
826 close(child_ready_pipe
[1]);
830 int perf_evlist__start_workload(struct perf_evlist
*evlist
)
832 if (evlist
->workload
.cork_fd
> 0) {
834 * Remove the cork, let it rip!
836 return close(evlist
->workload
.cork_fd
);
842 int perf_evlist__parse_sample(struct perf_evlist
*evlist
, union perf_event
*event
,
843 struct perf_sample
*sample
)
845 struct perf_evsel
*evsel
= perf_evlist__first(evlist
);
846 return perf_evsel__parse_sample(evsel
, event
, sample
);
849 size_t perf_evlist__fprintf(struct perf_evlist
*evlist
, FILE *fp
)
851 struct perf_evsel
*evsel
;
854 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
855 printed
+= fprintf(fp
, "%s%s", evsel
->idx
? ", " : "",
856 perf_evsel__name(evsel
));
859 return printed
+ fprintf(fp
, "\n");;