pmu->pmu_enable(pmu);
}
+static void perf_pmu_rotate_start(void)
+{
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+
+ if (hrtimer_active(&cpuctx->timer))
+ return;
+
+ __hrtimer_start_range_ns(&cpuctx->timer,
+ ns_to_ktime(cpuctx->timer_interval), 0,
+ HRTIMER_MODE_REL_PINNED, 0);
+}
+
+static void perf_pmu_rotate_stop(void)
+{
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+
+ hrtimer_cancel(&cpuctx->timer);
+}
+
static void get_ctx(struct perf_event_context *ctx)
{
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
}
list_add_rcu(&event->event_entry, &ctx->event_list);
+ if (!ctx->nr_events)
+ perf_pmu_rotate_start();
ctx->nr_events++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
cpuctx->task_ctx = ctx;
+
+ /*
+ * Since these rotations are per-cpu, we need to ensure the
+ * cpu-context we got scheduled on is actually rotating.
+ */
+ perf_pmu_rotate_start();
}
#define MAX_INTERRUPTS (~0ULL)
}
}
-static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
+static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
{
struct perf_event *event;
struct hw_perf_event *hwc;
hwc->freq_count_stamp = now;
if (delta > 0)
- perf_adjust_period(event, TICK_NSEC, delta);
+ perf_adjust_period(event, period, delta);
}
raw_spin_unlock(&ctx->lock);
}
raw_spin_unlock(&ctx->lock);
}
-void perf_event_task_tick(struct task_struct *curr)
+/*
+ * Cannot race with ->pmu_rotate_start() because this is ran from hardirq
+ * context, and ->pmu_rotate_start() is called with irqs disabled (both are
+ * cpu affine, so there are no SMP races).
+ */
+static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer)
{
+ enum hrtimer_restart restart = HRTIMER_NORESTART;
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
int rotate = 0;
- if (!atomic_read(&nr_events))
- return;
+ cpuctx = container_of(timer, struct perf_cpu_context, timer);
- cpuctx = &__get_cpu_var(perf_cpu_context);
- if (cpuctx->ctx.nr_events &&
- cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
- rotate = 1;
+ if (cpuctx->ctx.nr_events) {
+ restart = HRTIMER_RESTART;
+ if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
+ rotate = 1;
+ }
- ctx = curr->perf_event_ctxp;
- if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
- rotate = 1;
+ ctx = current->perf_event_ctxp;
+ if (ctx && ctx->nr_events) {
+ restart = HRTIMER_RESTART;
+ if (ctx->nr_events != ctx->nr_active)
+ rotate = 1;
+ }
- perf_ctx_adjust_freq(&cpuctx->ctx);
+ perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval);
if (ctx)
- perf_ctx_adjust_freq(ctx);
+ perf_ctx_adjust_freq(ctx, cpuctx->timer_interval);
if (!rotate)
- return;
+ goto done;
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
if (ctx)
- task_ctx_sched_in(curr, EVENT_FLEXIBLE);
+ task_ctx_sched_in(current, EVENT_FLEXIBLE);
+
+done:
+ hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval));
+
+ return restart;
}
static int event_enable_on_exec(struct perf_event *event,
}
__hrtimer_start_range_ns(&hwc->hrtimer,
ns_to_ktime(period), 0,
- HRTIMER_MODE_REL, 0);
+ HRTIMER_MODE_REL_PINNED, 0);
}
}
cpuctx = &per_cpu(perf_cpu_context, cpu);
__perf_event_init_context(&cpuctx->ctx, NULL);
+ cpuctx->timer_interval = TICK_NSEC;
+ hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cpuctx->timer.function = perf_event_context_tick;
}
}
struct perf_event_context *ctx = &cpuctx->ctx;
struct perf_event *event, *tmp;
+ perf_pmu_rotate_stop();
+
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
__perf_event_remove_from_context(event);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)