6 #include <linux/bitops.h>
7 #include <linux/hash.h>
9 void perf_evlist__init(struct perf_evlist
*evlist
)
13 for (i
= 0; i
< PERF_EVLIST__HLIST_SIZE
; ++i
)
14 INIT_HLIST_HEAD(&evlist
->heads
[i
]);
15 INIT_LIST_HEAD(&evlist
->entries
);
18 struct perf_evlist
*perf_evlist__new(void)
20 struct perf_evlist
*evlist
= zalloc(sizeof(*evlist
));
23 perf_evlist__init(evlist
);
28 static void perf_evlist__purge(struct perf_evlist
*evlist
)
30 struct perf_evsel
*pos
, *n
;
32 list_for_each_entry_safe(pos
, n
, &evlist
->entries
, node
) {
33 list_del_init(&pos
->node
);
34 perf_evsel__delete(pos
);
37 evlist
->nr_entries
= 0;
40 void perf_evlist__exit(struct perf_evlist
*evlist
)
45 evlist
->pollfd
= NULL
;
48 void perf_evlist__delete(struct perf_evlist
*evlist
)
50 perf_evlist__purge(evlist
);
51 perf_evlist__exit(evlist
);
55 void perf_evlist__add(struct perf_evlist
*evlist
, struct perf_evsel
*entry
)
57 list_add_tail(&entry
->node
, &evlist
->entries
);
61 int perf_evlist__add_default(struct perf_evlist
*evlist
)
63 struct perf_event_attr attr
= {
64 .type
= PERF_TYPE_HARDWARE
,
65 .config
= PERF_COUNT_HW_CPU_CYCLES
,
67 struct perf_evsel
*evsel
= perf_evsel__new(&attr
, 0);
72 perf_evlist__add(evlist
, evsel
);
76 int perf_evlist__alloc_pollfd(struct perf_evlist
*evlist
, int ncpus
, int nthreads
)
78 int nfds
= ncpus
* nthreads
* evlist
->nr_entries
;
79 evlist
->pollfd
= malloc(sizeof(struct pollfd
) * nfds
);
80 return evlist
->pollfd
!= NULL
? 0 : -ENOMEM
;
83 void perf_evlist__add_pollfd(struct perf_evlist
*evlist
, int fd
)
85 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
86 evlist
->pollfd
[evlist
->nr_fds
].fd
= fd
;
87 evlist
->pollfd
[evlist
->nr_fds
].events
= POLLIN
;
91 struct perf_evsel
*perf_evlist__id2evsel(struct perf_evlist
*evlist
, u64 id
)
93 struct hlist_head
*head
;
94 struct hlist_node
*pos
;
95 struct perf_sample_id
*sid
;
98 if (evlist
->nr_entries
== 1)
99 return list_entry(evlist
->entries
.next
, struct perf_evsel
, node
);
101 hash
= hash_64(id
, PERF_EVLIST__HLIST_BITS
);
102 head
= &evlist
->heads
[hash
];
104 hlist_for_each_entry(sid
, pos
, head
, node
)
110 union perf_event
*perf_evlist__read_on_cpu(struct perf_evlist
*evlist
, int cpu
)
112 /* XXX Move this to perf.c, making it generally available */
113 unsigned int page_size
= sysconf(_SC_PAGE_SIZE
);
114 struct perf_mmap
*md
= &evlist
->mmap
[cpu
];
115 unsigned int head
= perf_mmap__read_head(md
);
116 unsigned int old
= md
->prev
;
117 unsigned char *data
= md
->base
+ page_size
;
118 union perf_event
*event
= NULL
;
120 if (evlist
->overwrite
) {
122 * If we're further behind than half the buffer, there's a chance
123 * the writer will bite our tail and mess up the samples under us.
125 * If we somehow ended up ahead of the head, we got messed up.
127 * In either case, truncate and restart at head.
129 int diff
= head
- old
;
130 if (diff
> md
->mask
/ 2 || diff
< 0) {
131 fprintf(stderr
, "WARNING: failed to keep up with mmap data.\n");
134 * head points to a known good entry, start there.
143 event
= (union perf_event
*)&data
[old
& md
->mask
];
144 size
= event
->header
.size
;
147 * Event straddles the mmap boundary -- header should always
148 * be inside due to u64 alignment of output.
150 if ((old
& md
->mask
) + size
!= ((old
+ size
) & md
->mask
)) {
151 unsigned int offset
= old
;
152 unsigned int len
= min(sizeof(*event
), size
), cpy
;
153 void *dst
= &evlist
->event_copy
;
156 cpy
= min(md
->mask
+ 1 - (offset
& md
->mask
), len
);
157 memcpy(dst
, &data
[offset
& md
->mask
], cpy
);
163 event
= &evlist
->event_copy
;
171 if (!evlist
->overwrite
)
172 perf_mmap__write_tail(md
, old
);