perf: Fix time locking
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 20 Nov 2009 21:19:54 +0000 (22:19 +0100)
committerIngo Molnar <mingo@elte.hu>
Sat, 21 Nov 2009 13:11:39 +0000 (14:11 +0100)
Most sites updating ctx->time and event times do so under
ctx->lock, make sure they all do.

This was made possible by removing the __perf_event_read() call
from __perf_event_sync_stat(), which already had this lock
taken.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <20091120212509.102316434@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/perf_event.c

index 028619dd6d0ea4615f0363bda96986302cd1e5af..fdfae888a67ce9fe6a9b9df9010f47733ec382e9 100644 (file)
@@ -1526,8 +1526,11 @@ static void __perf_event_read(void *info)
        if (ctx->task && cpuctx->task_ctx != ctx)
                return;
 
+       spin_lock(&ctx->lock);
        update_context_time(ctx);
        update_event_times(event);
+       spin_unlock(&ctx->lock);
+
        event->pmu->read(event);
 }
 
@@ -1541,7 +1544,13 @@ static u64 perf_event_read(struct perf_event *event)
                smp_call_function_single(event->oncpu,
                                         __perf_event_read, event, 1);
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
+               struct perf_event_context *ctx = event->ctx;
+               unsigned long flags;
+
+               spin_lock_irqsave(&ctx->lock, flags);
+               update_context_time(ctx);
                update_event_times(event);
+               spin_unlock_irqrestore(&ctx->lock, flags);
        }
 
        return atomic64_read(&event->count);