return val;
}
-static u64 perf_event_read(struct perf_event *event)
+static void perf_event_read(struct perf_event *event)
{
/*
* If event is enabled and currently active on a CPU, update the
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
-
- return perf_event_count(event);
}
/*
*running = 0;
mutex_lock(&event->child_mutex);
- total += perf_event_read(event);
+
+ perf_event_read(event);
+ total += perf_event_count(event);
+
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
*running += event->total_time_running +
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
- total += perf_event_read(child);
+ perf_event_read(child);
+ total += perf_event_count(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
static void _perf_event_reset(struct perf_event *event)
{
- (void)perf_event_read(event);
+ perf_event_read(event);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}