2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include "thread_map.h"
18 #include <linux/bitops.h>
19 #include <linux/hash.h>
21 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
22 #define SID(e, x, y) xyarray__entry(e->id, x, y)
24 void perf_evlist__init(struct perf_evlist
*evlist
)
28 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
29 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
30 INIT_LIST_HEAD(&evlist
->entries
);
33 struct perf_evlist
*perf_evlist__new(void)
35 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
38 perf_evlist__init(evlist
);
43 static void perf_evlist__purge(struct perf_evlist
*evlist
)
45 struct perf_evsel
*pos
, *n
;
47 list_for_each_entry_safe(pos
, n
, &evlist
->entries
, node
) {
48 list_del_init(&pos
->node
);
49 perf_evsel__delete(pos
);
52 evlist
->nr_entries
= 0;
55 void perf_evlist__exit(struct perf_evlist
*evlist
)
60 evlist
->pollfd
= NULL
;
63 void perf_evlist__delete(struct perf_evlist
*evlist
)
65 perf_evlist__purge(evlist
);
66 perf_evlist__exit(evlist
);
70 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
72 list_add_tail(&entry
->node
, &evlist
->entries
);
76 int perf_evlist__add_default(struct perf_evlist
*evlist
)
78 struct perf_event_attr attr
= {
79 .type
= PERF_TYPE_HARDWARE
,
80 .config
= PERF_COUNT_HW_CPU_CYCLES
,
82 struct perf_evsel
*evsel
= perf_evsel__new(&attr
, 0);
87 perf_evlist__add(evlist
, evsel
);
91 int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
, int ncpus
, int nthreads
)
93 int nfds
= ncpus
* nthreads
* evlist
->nr_entries
;
94 evlist
->pollfd
= malloc(sizeof(struct pollfd
) * nfds
);
95 return evlist
->pollfd
!= NULL
? 0 : -ENOMEM
;
98 void perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
100 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
101 evlist
->pollfd
[evlist
->nr_fds
].fd
= fd
;
102 evlist
->pollfd
[evlist
->nr_fds
].events
= POLLIN
;
106 static int perf_evlist__id_hash(struct perf_evlist
*evlist
, struct perf_evsel
*evsel
,
107 int cpu
, int thread
, int fd
)
109 struct perf_sample_id
*sid
;
110 u64 read_data
[4] = { 0, };
111 int hash
, id_idx
= 1; /* The first entry is the counter value */
113 if (!(evsel
->attr
.read_format
& PERF_FORMAT_ID
) ||
114 read(fd
, &read_data
, sizeof(read_data
)) == -1)
117 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
119 if (evsel
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
122 sid
= SID(evsel
, cpu
, thread
);
123 sid
->id
= read_data
[id_idx
];
125 hash
= hash_64(sid
->id
, PERF_EVLIST__HLIST_BITS
);
126 hlist_add_head(&sid
->node
, &evlist
->heads
[hash
]);
130 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
132 struct hlist_head
*head
;
133 struct hlist_node
*pos
;
134 struct perf_sample_id
*sid
;
137 if (evlist
->nr_entries
== 1)
138 return list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
140 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
141 head
= &evlist
->heads
[hash
];
143 hlist_for_each_entry(sid
, pos
, head
, node
)
149 union perf_event
*perf_evlist__read_on_cpu(struct perf_evlist
*evlist
, int cpu
)
151 /* XXX Move this to perf.c, making it generally available */
152 unsigned int page_size
= sysconf(_SC_PAGE_SIZE
);
153 struct perf_mmap
*md
= &evlist
->mmap
[cpu
];
154 unsigned int head
= perf_mmap__read_head(md
);
155 unsigned int old
= md
->prev
;
156 unsigned char *data
= md
->base
+ page_size
;
157 union perf_event
*event
= NULL
;
159 if (evlist
->overwrite
) {
161 * If we're further behind than half the buffer, there's a chance
162 * the writer will bite our tail and mess up the samples under us.
164 * If we somehow ended up ahead of the head, we got messed up.
166 * In either case, truncate and restart at head.
168 int diff
= head
- old
;
169 if (diff
> md
->mask
/ 2 || diff
< 0) {
170 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
173 * head points to a known good entry, start there.
182 event
= (union perf_event
*)&data
[old
& md
->mask
];
183 size
= event
->header
.size
;
186 * Event straddles the mmap boundary -- header should always
187 * be inside due to u64 alignment of output.
189 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
190 unsigned int offset
= old
;
191 unsigned int len
= min(sizeof(*event
), size
), cpy
;
192 void *dst
= &evlist
->event_copy
;
195 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
196 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
202 event
= &evlist
->event_copy
;
210 if (!evlist
->overwrite
)
211 perf_mmap__write_tail(md
, old
);
216 void perf_evlist__munmap(struct perf_evlist
*evlist
, int ncpus
)
220 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
221 if (evlist
->mmap
[cpu
].base
!= NULL
) {
222 munmap(evlist
->mmap
[cpu
].base
, evlist
->mmap_len
);
223 evlist
->mmap
[cpu
].base
= NULL
;
228 int perf_evlist__alloc_mmap(struct perf_evlist
*evlist
, int ncpus
)
230 evlist
->mmap
= zalloc(ncpus
* sizeof(struct perf_mmap
));
231 return evlist
->mmap
!= NULL
? 0 : -ENOMEM
;
234 static int __perf_evlist__mmap(struct perf_evlist
*evlist
, int cpu
, int prot
,
237 evlist
->mmap
[cpu
].prev
= 0;
238 evlist
->mmap
[cpu
].mask
= mask
;
239 evlist
->mmap
[cpu
].base
= mmap(NULL
, evlist
->mmap_len
, prot
,
241 if (evlist
->mmap
[cpu
].base
== MAP_FAILED
)
244 perf_evlist__add_pollfd(evlist
, fd
);
248 /** perf_evlist__mmap - Create per cpu maps to receive events
250 * @evlist - list of events
251 * @cpus - cpu map being monitored
252 * @threads - threads map being monitored
253 * @pages - map length in pages
254 * @overwrite - overwrite older events?
256 * If overwrite is false the user needs to signal event consuption using:
258 * struct perf_mmap *m = &evlist->mmap[cpu];
259 * unsigned int head = perf_mmap__read_head(m);
261 * perf_mmap__write_tail(m, head)
263 int perf_evlist__mmap(struct perf_evlist
*evlist
, struct cpu_map
*cpus
,
264 struct thread_map
*threads
, int pages
, bool overwrite
)
266 unsigned int page_size
= sysconf(_SC_PAGE_SIZE
);
267 int mask
= pages
* page_size
- 1, cpu
;
268 struct perf_evsel
*first_evsel
, *evsel
;
269 int thread
, prot
= PROT_READ
| (overwrite
? 0 : PROT_WRITE
);
271 if (evlist
->mmap
== NULL
&&
272 perf_evlist__alloc_mmap(evlist
, cpus
->nr
) < 0)
275 if (evlist
->pollfd
== NULL
&&
276 perf_evlist__alloc_pollfd(evlist
, cpus
->nr
, threads
->nr
) < 0)
279 evlist
->overwrite
= overwrite
;
280 evlist
->mmap_len
= (pages
+ 1) * page_size
;
281 first_evsel
= list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
283 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
284 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
286 perf_evsel__alloc_id(evsel
, cpus
->nr
, threads
->nr
) < 0)
289 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
290 for (thread
= 0; thread
< threads
->nr
; thread
++) {
291 int fd
= FD(evsel
, cpu
, thread
);
293 if (evsel
->idx
|| thread
) {
294 if (ioctl(fd
, PERF_EVENT_IOC_SET_OUTPUT
,
295 FD(first_evsel
, cpu
, 0)) != 0)
297 } else if (__perf_evlist__mmap(evlist
, cpu
, prot
, mask
, fd
) < 0)
300 if ((evsel
->attr
.read_format
& PERF_FORMAT_ID
) &&
301 perf_evlist__id_hash(evlist
, evsel
, cpu
, thread
, fd
) < 0)
310 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
311 if (evlist
->mmap
[cpu
].base
!= NULL
) {
312 munmap(evlist
->mmap
[cpu
].base
, evlist
->mmap_len
);
313 evlist
->mmap
[cpu
].base
= NULL
;