if (ctx->task && cpuctx->task_ctx != ctx)
return;
+ spin_lock(&ctx->lock);
update_context_time(ctx);
update_event_times(event);
+ spin_unlock(&ctx->lock);
+
event->pmu->read(event);
}
smp_call_function_single(event->oncpu,
__perf_event_read, event, 1);
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
+ struct perf_event_context *ctx = event->ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ update_context_time(ctx);
update_event_times(event);
+ spin_unlock_irqrestore(&ctx->lock, flags);
}
return atomic64_read(&event->count);