From: Arnaldo Carvalho de Melo Date: Mon, 8 Sep 2014 14:27:49 +0000 (-0300) Subject: perf evlist: Unmap when all refcounts to fd are gone and events drained X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=e4b356b56cfe77b800a9bc2e6efefa6a069b8a78;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git perf evlist: Unmap when all refcounts to fd are gone and events drained As noticed by receiving a POLLHUP for all its pollfd entries. That will remove the refcount taken in perf_evlist__mmap_per_evsel(), and when all events are consumed via perf_evlist__mmap_read() + perf_evlist__mmap_consume(), the ring buffer will be unmap'ed. Thanks to Jiri Olsa for pointing out that we must wait till all events are consumed, not being ok to unmmap just when receiving all the POLLHUPs. Cc: Adrian Hunter Cc: Borislav Petkov Cc: Corey Ashford Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jean Pihet Cc: Jiri Olsa Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-t10w1xk4myp7ca7m9fvip6a0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 61d18dc83e8e..3cebc9a8d52e 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -25,11 +25,12 @@ #include #include +static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); +static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); + #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) -static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); - void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads) { @@ -426,16 +427,38 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) return 0; } +static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx) +{ + int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP); + /* + * Save the idx so that when we filter out fds POLLHUP'ed we can + * close the associated evlist->mmap[] entry. + */ + if (pos >= 0) { + evlist->pollfd.priv[pos].idx = idx; + + fcntl(fd, F_SETFL, O_NONBLOCK); + } + + return pos; +} + int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) { - fcntl(fd, F_SETFL, O_NONBLOCK); + return __perf_evlist__add_pollfd(evlist, fd, -1); +} + +static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd) +{ + struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd); - return fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP); + perf_evlist__mmap_put(evlist, fda->priv[fd].idx); } int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) { - return fdarray__filter(&evlist->pollfd, revents_and_mask, NULL); + return fdarray__filter(&evlist->pollfd, revents_and_mask, + perf_evlist__munmap_filtered); } int perf_evlist__poll(struct perf_evlist *evlist, int timeout) @@ -777,7 +800,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, perf_evlist__mmap_get(evlist, idx); } - if (perf_evlist__add_pollfd(evlist, fd) < 0) { + if (__perf_evlist__add_pollfd(evlist, fd, idx) < 0) { perf_evlist__mmap_put(evlist, idx); return -1; }