From: Frederic Weisbecker Date: Sun, 17 Jan 2010 11:56:05 +0000 (+0100) Subject: perf: Better order flexible and pinned scheduling X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=329c0e012b99fa2325a0be205c052e4aba690f16;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git perf: Better order flexible and pinned scheduling When a task gets scheduled in. We don't touch the cpu bound events so the priority order becomes: cpu pinned, cpu flexible, task pinned, task flexible. So schedule out cpu flexibles when a new task context gets in and correctly order the groups to schedule in: task pinned, cpu flexible, task flexible. Cpu pinned groups don't need to be touched at this time. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo --- diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a90ae694cbc1..edc46b92b508 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1362,6 +1362,14 @@ ctx_sched_in(struct perf_event_context *ctx, raw_spin_unlock(&ctx->lock); } +static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, + enum event_type_t event_type) +{ + struct perf_event_context *ctx = &cpuctx->ctx; + + ctx_sched_in(ctx, cpuctx, event_type); +} + static void task_ctx_sched_in(struct task_struct *task, enum event_type_t event_type) { @@ -1388,15 +1396,27 @@ static void task_ctx_sched_in(struct task_struct *task, */ void perf_event_task_sched_in(struct task_struct *task) { - task_ctx_sched_in(task, EVENT_ALL); -} + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_event_context *ctx = task->perf_event_ctxp; -static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, - enum event_type_t event_type) -{ - struct perf_event_context *ctx = &cpuctx->ctx; + if (likely(!ctx)) + return; - ctx_sched_in(ctx, cpuctx, event_type); + if (cpuctx->task_ctx == ctx) + return; + + /* + * We want to keep the following priority order: + * cpu pinned (that don't need to move), task pinned, + * cpu flexible, task flexible. + */ + cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); + + ctx_sched_in(ctx, cpuctx, EVENT_PINNED); + cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); + ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE); + + cpuctx->task_ctx = ctx; } #define MAX_INTERRUPTS (~0ULL)