evsel->hists.stats.total_period += cost;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
+ if (!he->filtered)
+ evsel->hists.stats.nr_non_filtered_samples++;
err = hist_entry__append_callchain(he, sample);
out:
return err;
evsel->hists.stats.total_period += 1;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
+ if (!he->filtered)
+ evsel->hists.stats.nr_non_filtered_samples++;
} else
goto out;
}
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
evsel->hists.stats.total_period += sample->period;
+ if (!he->filtered)
+ evsel->hists.stats.nr_non_filtered_samples++;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
out:
return err;
next = rb_first(root);
hists->entries = RB_ROOT;
- hists->nr_entries = 0;
- hists->stats.total_period = 0;
+ hists->nr_entries = hists->nr_non_filtered_entries = 0;
+ hists->stats.total_period = hists->stats.total_non_filtered_period = 0;
hists__reset_col_len(hists);
while (next) {
return;
++hists->nr_entries;
- if (h->ms.unfolded)
+ ++hists->nr_non_filtered_entries;
+ if (h->ms.unfolded) {
hists->nr_entries += h->nr_rows;
+ hists->nr_non_filtered_entries += h->nr_rows;
+ }
h->row_offset = 0;
hists->stats.total_period += h->stat.period;
+ hists->stats.total_non_filtered_period += h->stat.period;
hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
+ hists->stats.nr_non_filtered_samples += h->stat.nr_events;
hists__calc_col_len(hists, h);
}
struct rb_node *nd;
hists->nr_entries = hists->stats.total_period = 0;
+ hists->nr_non_filtered_entries = hists->stats.total_non_filtered_period = 0;
hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+ hists->stats.nr_non_filtered_samples = 0;
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct rb_node *nd;
hists->nr_entries = hists->stats.total_period = 0;
+ hists->nr_non_filtered_entries = hists->stats.total_non_filtered_period = 0;
hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+ hists->stats.nr_non_filtered_samples = 0;
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct rb_node *nd;
hists->nr_entries = hists->stats.total_period = 0;
+ hists->nr_non_filtered_entries = hists->stats.total_non_filtered_period = 0;
hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+ hists->stats.nr_non_filtered_samples = 0;
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
*/
struct events_stats {
u64 total_period;
+ u64 total_non_filtered_period;
u64 total_lost;
u64 total_invalid_chains;
u32 nr_events[PERF_RECORD_HEADER_MAX];
+ u32 nr_non_filtered_samples;
u32 nr_lost_warned;
u32 nr_unknown_events;
u32 nr_invalid_chains;
struct rb_root entries;
struct rb_root entries_collapsed;
u64 nr_entries;
+ u64 nr_non_filtered_entries;
const struct thread *thread_filter;
const struct dso *dso_filter;
const char *uid_filter_str;