perf, x86: Store perfctr msr addresses in config_base/event_base
authorRobert Richter <robert.richter@amd.com>
Wed, 2 Feb 2011 16:40:59 +0000 (17:40 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 16 Feb 2011 12:30:52 +0000 (13:30 +0100)
Instead of storing the base addresses we can store the counter's msr
addresses directly in config_base/event_base of struct hw_perf_event.
This avoids recalculating the address with each msr access. The
addresses are configured one time. We also need this change to later
modify the address calculation.

Signed-off-by: Robert Richter <robert.richter@amd.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1296664860-10886-5-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/cpu/perf_event_p6.c

index ee40c1ad0ebcd0cc37388fc4724b01e075f16f65..316194330da066e5be27c30e6ae7162898a97c81 100644 (file)
@@ -298,7 +298,7 @@ x86_perf_event_update(struct perf_event *event)
         */
 again:
        prev_raw_count = local64_read(&hwc->prev_count);
-       rdmsrl(hwc->event_base + idx, new_raw_count);
+       rdmsrl(hwc->event_base, new_raw_count);
 
        if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                                        new_raw_count) != prev_raw_count)
@@ -655,7 +655,7 @@ static void x86_pmu_disable(struct pmu *pmu)
 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
                                          u64 enable_mask)
 {
-       wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
+       wrmsrl(hwc->config_base, hwc->config | enable_mask);
 }
 
 static void x86_pmu_enable_all(int added)
@@ -834,15 +834,10 @@ static inline void x86_assign_hw_event(struct perf_event *event,
                hwc->event_base = 0;
        } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
                hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
-               /*
-                * We set it so that event_base + idx in wrmsr/rdmsr maps to
-                * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
-                */
-               hwc->event_base =
-                       MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
+               hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0;
        } else {
-               hwc->config_base = x86_pmu.eventsel;
-               hwc->event_base  = x86_pmu.perfctr;
+               hwc->config_base = x86_pmu_config_addr(hwc->idx);
+               hwc->event_base  = x86_pmu_event_addr(hwc->idx);
        }
 }
 
@@ -932,7 +927,7 @@ static inline void x86_pmu_disable_event(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
 
-       wrmsrl(hwc->config_base + hwc->idx, hwc->config);
+       wrmsrl(hwc->config_base, hwc->config);
 }
 
 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -985,7 +980,7 @@ x86_perf_event_set_period(struct perf_event *event)
         */
        local64_set(&hwc->prev_count, (u64)-left);
 
-       wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
+       wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
 
        /*
         * Due to erratum on certan cpu we need
@@ -993,7 +988,7 @@ x86_perf_event_set_period(struct perf_event *event)
         * is updated properly
         */
        if (x86_pmu.perfctr_second_write) {
-               wrmsrl(hwc->event_base + idx,
+               wrmsrl(hwc->event_base,
                        (u64)(-left) & x86_pmu.cntval_mask);
        }
 
index ff751a9f182b15bcb5ff94a00079c26a3d80229d..3769ac822f96b09a64548f0a43fa6a6cf7dac82a 100644 (file)
@@ -764,9 +764,9 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
        u64 v;
 
        /* an official way for overflow indication */
-       rdmsrl(hwc->config_base + hwc->idx, v);
+       rdmsrl(hwc->config_base, v);
        if (v & P4_CCCR_OVF) {
-               wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF);
+               wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
                return 1;
        }
 
@@ -815,7 +815,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
         * state we need to clear P4_CCCR_OVF, otherwise interrupt get
         * asserted again and again
         */
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+       (void)checking_wrmsrl(hwc->config_base,
                (u64)(p4_config_unpack_cccr(hwc->config)) &
                        ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
 }
@@ -885,7 +885,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
        p4_pmu_enable_pebs(hwc->config);
 
        (void)checking_wrmsrl(escr_addr, escr_conf);
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+       (void)checking_wrmsrl(hwc->config_base,
                                (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
 }
 
index 34ba07be2cdab5ffa45b3dfe783fc24a939bceb1..20c097e33860b7d5a599fbf132622b9c383ad8a5 100644 (file)
@@ -68,7 +68,7 @@ p6_pmu_disable_event(struct perf_event *event)
        if (cpuc->enabled)
                val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
+       (void)checking_wrmsrl(hwc->config_base, val);
 }
 
 static void p6_pmu_enable_event(struct perf_event *event)
@@ -81,7 +81,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
        if (cpuc->enabled)
                val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
+       (void)checking_wrmsrl(hwc->config_base, val);
 }
 
 static __initconst const struct x86_pmu p6_pmu = {