spin_lock_irqsave(&ctx->lock, flags);
+ update_context_time(ctx);
+
/*
* If the counter is on, turn it off.
* If it is in error state, leave it in error state.
if (likely(!cpuctx->task_ctx))
return;
+ update_context_time(ctx);
+
regs = task_pt_regs(task);
perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
__perf_counter_sched_out(ctx, cpuctx);
u64 prev, now;
s64 delta;
- update_context_time(counter->ctx);
now = counter->ctx->time;
prev = atomic64_xchg(&counter->hw.prev_count, now);
struct hw_perf_counter *hwc = &counter->hw;
u64 now;
- update_context_time(counter->ctx);
now = counter->ctx->time;
atomic64_set(&hwc->prev_count, now);
static void task_clock_perf_counter_read(struct perf_counter *counter)
{
+ update_context_time(counter->ctx);
task_clock_perf_counter_update(counter);
}