perf/x86/intel: Clean up intel_commit_scheduling() placement
authorPeter Zijlstra <peterz@infradead.org>
Thu, 21 May 2015 08:57:32 +0000 (10:57 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 27 May 2015 07:17:44 +0000 (09:17 +0200)
Move the code of intel_commit_scheduling() to the right place, which is
in between start() and stop().

No change in functionality.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c

index ef78516850fb0e3ef653ff97bad5152b46389787..e5609522255c5702b6e6730af3193539c110467c 100644 (file)
@@ -527,10 +527,10 @@ struct x86_pmu {
        void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
                                                 struct perf_event *event);
 
-       void            (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
-
        void            (*start_scheduling)(struct cpu_hw_events *cpuc);
 
+       void            (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
+
        void            (*stop_scheduling)(struct cpu_hw_events *cpuc);
 
        struct event_constraint *event_constraints;
index d7d30b41f6a39ef155f8709ab0d69ada3d5721c7..ff56fc3f016e187214aee720354ae85d727930ac 100644 (file)
@@ -1934,6 +1934,34 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
        memcpy(xl->init_state, xl->state, sizeof(xl->init_state));
 }
 
+static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
+{
+       struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+       struct event_constraint *c = cpuc->event_constraint[idx];
+       struct intel_excl_states *xl;
+       int tid = cpuc->excl_thread_id;
+
+       if (cpuc->is_fake || !is_ht_workaround_enabled())
+               return;
+
+       if (WARN_ON_ONCE(!excl_cntrs))
+               return;
+
+       if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
+               return;
+
+       xl = &excl_cntrs->states[tid];
+
+       lockdep_assert_held(&excl_cntrs->lock);
+
+       if (cntr >= 0) {
+               if (c->flags & PERF_X86_EVENT_EXCL)
+                       xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
+               else
+                       xl->init_state[cntr] = INTEL_EXCL_SHARED;
+       }
+}
+
 static void
 intel_stop_scheduling(struct cpu_hw_events *cpuc)
 {
@@ -2184,34 +2212,6 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
                intel_put_excl_constraints(cpuc, event);
 }
 
-static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
-{
-       struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-       struct event_constraint *c = cpuc->event_constraint[idx];
-       struct intel_excl_states *xl;
-       int tid = cpuc->excl_thread_id;
-
-       if (cpuc->is_fake || !is_ht_workaround_enabled())
-               return;
-
-       if (WARN_ON_ONCE(!excl_cntrs))
-               return;
-
-       if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
-               return;
-
-       xl = &excl_cntrs->states[tid];
-
-       lockdep_assert_held(&excl_cntrs->lock);
-
-       if (cntr >= 0) {
-               if (c->flags & PERF_X86_EVENT_EXCL)
-                       xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
-               else
-                       xl->init_state[cntr] = INTEL_EXCL_SHARED;
-       }
-}
-
 static void intel_pebs_aliases_core2(struct perf_event *event)
 {
        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
@@ -2920,8 +2920,8 @@ static __init void intel_ht_bug(void)
 {
        x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
 
-       x86_pmu.commit_scheduling = intel_commit_scheduling;
        x86_pmu.start_scheduling = intel_start_scheduling;
+       x86_pmu.commit_scheduling = intel_commit_scheduling;
        x86_pmu.stop_scheduling = intel_stop_scheduling;
 }
 
@@ -3377,8 +3377,8 @@ static __init int fixup_ht_bug(void)
 
        x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
 
-       x86_pmu.commit_scheduling = NULL;
        x86_pmu.start_scheduling = NULL;
+       x86_pmu.commit_scheduling = NULL;
        x86_pmu.stop_scheduling = NULL;
 
        watchdog_nmi_enable_all();