perf, x86: Remove superfluous arguments to x86_perf_event_set_period()
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Tue, 2 Mar 2010 19:16:01 +0000 (20:16 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 10 Mar 2010 12:22:27 +0000 (13:22 +0100)
The second and third argument to x86_perf_event_set_period() are
superfluous since they are simple expressions of the first argument.
Hence remove them.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
LKML-Reference: <20100304140100.006500906@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c

index 585d5608ae6bea1aa1631f6c90a2c74170d59e66..fcf1788f9626ac3b0ad43ac034966c5300cf94fb 100644 (file)
@@ -170,8 +170,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
        .enabled = 1,
 };
 
-static int x86_perf_event_set_period(struct perf_event *event,
-                            struct hw_perf_event *hwc, int idx);
+static int x86_perf_event_set_period(struct perf_event *event);
 
 /*
  * Generalized hw caching related hw_event table, filled
@@ -835,7 +834,7 @@ void hw_perf_enable(void)
 
                        if (hwc->idx == -1) {
                                x86_assign_hw_event(event, cpuc, i);
-                               x86_perf_event_set_period(event, hwc, hwc->idx);
+                               x86_perf_event_set_period(event);
                        }
                        /*
                         * need to mark as active because x86_pmu_disable()
@@ -876,12 +875,12 @@ static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
  * To be called with the event disabled in hw:
  */
 static int
-x86_perf_event_set_period(struct perf_event *event,
-                            struct hw_perf_event *hwc, int idx)
+x86_perf_event_set_period(struct perf_event *event)
 {
+       struct hw_perf_event *hwc = &event->hw;
        s64 left = atomic64_read(&hwc->period_left);
        s64 period = hwc->sample_period;
-       int err, ret = 0;
+       int err, ret = 0, idx = hwc->idx;
 
        if (idx == X86_PMC_IDX_FIXED_BTS)
                return 0;
@@ -979,7 +978,7 @@ static int x86_pmu_start(struct perf_event *event)
        if (hwc->idx == -1)
                return -EAGAIN;
 
-       x86_perf_event_set_period(event, hwc, hwc->idx);
+       x86_perf_event_set_period(event);
        x86_pmu.enable(hwc, hwc->idx);
 
        return 0;
@@ -1123,7 +1122,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                handled         = 1;
                data.period     = event->hw.last_period;
 
-               if (!x86_perf_event_set_period(event, hwc, idx))
+               if (!x86_perf_event_set_period(event))
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
index c582449163fa312950934f301f0a725a98e123fe..6dbdf91ab342118bb16434acd63dfd424578b802 100644 (file)
@@ -699,7 +699,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event)
        int ret;
 
        x86_perf_event_update(event, hwc, idx);
-       ret = x86_perf_event_set_period(event, hwc, idx);
+       ret = x86_perf_event_set_period(event);
 
        return ret;
 }