From 4beb31f3657348a8b702dd014d01c520e522012f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Jul 2013 02:31:02 +0200 Subject: [PATCH] perf: Split the per-cpu accounting part of the event accounting code This way we can use the per-cpu handling seperately. This is going to be used by to fix the event migration code accounting. Original-patch-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374539466-4799-5-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 87 ++++++++++++++++++++++++++++---------------- 1 file changed, 55 insertions(+), 32 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 158fd5789e58..3a4b73aebc42 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3128,6 +3128,40 @@ static void free_event_rcu(struct rcu_head *head) static void ring_buffer_put(struct ring_buffer *rb); static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); +static void unaccount_event_cpu(struct perf_event *event, int cpu) +{ + if (event->parent) + return; + + if (has_branch_stack(event)) { + if (!(event->attach_state & PERF_ATTACH_TASK)) + atomic_dec(&per_cpu(perf_branch_stack_events, cpu)); + } + if (is_cgroup_event(event)) + atomic_dec(&per_cpu(perf_cgroup_events, cpu)); +} + +static void unaccount_event(struct perf_event *event) +{ + if (event->parent) + return; + + if (event->attach_state & PERF_ATTACH_TASK) + static_key_slow_dec_deferred(&perf_sched_events); + if (event->attr.mmap || event->attr.mmap_data) + atomic_dec(&nr_mmap_events); + if (event->attr.comm) + atomic_dec(&nr_comm_events); + if (event->attr.task) + atomic_dec(&nr_task_events); + if (is_cgroup_event(event)) + static_key_slow_dec_deferred(&perf_sched_events); + if (has_branch_stack(event)) + static_key_slow_dec_deferred(&perf_sched_events); + + unaccount_event_cpu(event, event->cpu); +} + static void __free_event(struct perf_event *event) { if (!event->parent) { @@ -3147,29 +3181,7 @@ static void free_event(struct perf_event *event) { irq_work_sync(&event->pending); - if (!event->parent) { - if (event->attach_state & PERF_ATTACH_TASK) - static_key_slow_dec_deferred(&perf_sched_events); - if (event->attr.mmap || event->attr.mmap_data) - atomic_dec(&nr_mmap_events); - if (event->attr.comm) - atomic_dec(&nr_comm_events); - if (event->attr.task) - atomic_dec(&nr_task_events); - if (is_cgroup_event(event)) { - atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); - static_key_slow_dec_deferred(&perf_sched_events); - } - - if (has_branch_stack(event)) { - static_key_slow_dec_deferred(&perf_sched_events); - /* is system-wide event */ - if (!(event->attach_state & PERF_ATTACH_TASK)) { - atomic_dec(&per_cpu(perf_branch_stack_events, - event->cpu)); - } - } - } + unaccount_event(event); if (event->rb) { struct ring_buffer *rb; @@ -6451,8 +6463,24 @@ unlock: return pmu; } +static void account_event_cpu(struct perf_event *event, int cpu) +{ + if (event->parent) + return; + + if (has_branch_stack(event)) { + if (!(event->attach_state & PERF_ATTACH_TASK)) + atomic_inc(&per_cpu(perf_branch_stack_events, cpu)); + } + if (is_cgroup_event(event)) + atomic_inc(&per_cpu(perf_cgroup_events, cpu)); +} + static void account_event(struct perf_event *event) { + if (event->parent) + return; + if (event->attach_state & PERF_ATTACH_TASK) static_key_slow_inc(&perf_sched_events.key); if (event->attr.mmap || event->attr.mmap_data) @@ -6461,17 +6489,12 @@ static void account_event(struct perf_event *event) atomic_inc(&nr_comm_events); if (event->attr.task) atomic_inc(&nr_task_events); - if (has_branch_stack(event)) { + if (has_branch_stack(event)) static_key_slow_inc(&perf_sched_events.key); - if (!(event->attach_state & PERF_ATTACH_TASK)) - atomic_inc(&per_cpu(perf_branch_stack_events, - event->cpu)); - } - - if (is_cgroup_event(event)) { - atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); + if (is_cgroup_event(event)) static_key_slow_inc(&perf_sched_events.key); - } + + account_event_cpu(event, event->cpu); } /* -- 2.20.1