#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
+
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
struct thread_map *threads)
{
return event;
}
+static bool perf_mmap__empty(struct perf_mmap *md)
+{
+ return perf_mmap__read_head(md) != md->prev;
+}
+
+static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
+{
+ ++evlist->mmap[idx].refcnt;
+}
+
+static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
+{
+ BUG_ON(evlist->mmap[idx].refcnt == 0);
+
+ if (--evlist->mmap[idx].refcnt == 0)
+ __perf_evlist__munmap(evlist, idx);
+}
+
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
+ struct perf_mmap *md = &evlist->mmap[idx];
+
if (!evlist->overwrite) {
- struct perf_mmap *md = &evlist->mmap[idx];
unsigned int old = md->prev;
perf_mmap__write_tail(md, old);
}
+
+ if (md->refcnt == 1 && perf_mmap__empty(md))
+ perf_evlist__mmap_put(evlist, idx);
}
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
if (evlist->mmap[idx].base != NULL) {
munmap(evlist->mmap[idx].base, evlist->mmap_len);
evlist->mmap[idx].base = NULL;
+ evlist->mmap[idx].refcnt = 0;
}
}
static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
struct mmap_params *mp, int fd)
{
+ /*
+ * The last one will be done at perf_evlist__mmap_consume(), so that we
+ * make sure we don't prevent tools from consuming every last event in
+ * the ring buffer.
+ *
+ * I.e. we can get the POLLHUP meaning that the fd doesn't exist
+ * anymore, but the last events for it are still in the ring buffer,
+ * waiting to be consumed.
+ *
+ * Tools can chose to ignore this at their own discretion, but the
+ * evlist layer can't just drop it when filtering events in
+ * perf_evlist__filter_pollfd().
+ */
+ evlist->mmap[idx].refcnt = 2;
evlist->mmap[idx].prev = 0;
evlist->mmap[idx].mask = mp->mask;
evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
} else {
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
return -1;
+
+ perf_evlist__mmap_get(evlist, idx);
}
- if (perf_evlist__add_pollfd(evlist, fd) < 0)
+ if (perf_evlist__add_pollfd(evlist, fd) < 0) {
+ perf_evlist__mmap_put(evlist, idx);
return -1;
+ }
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)