perf, x86: Use unlocked bitops
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Tue, 2 Mar 2010 20:16:55 +0000 (21:16 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 10 Mar 2010 12:22:29 +0000 (13:22 +0100)
There is no concurrency on these variables, so don't use LOCK'ed ops.

As to the intel_pmu_handle_irq() status bit clean, nobody uses that so
remove it all together.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
LKML-Reference: <20100304140100.240023029@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/cpu/perf_event_intel.c

index 2dd704fa129901bdf1a562a800a967958387f5de..01b166737424b8f87bc0ea7f28408000388f5b35 100644 (file)
@@ -643,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
                if (test_bit(hwc->idx, used_mask))
                        break;
 
-               set_bit(hwc->idx, used_mask);
+               __set_bit(hwc->idx, used_mask);
                if (assign)
                        assign[i] = hwc->idx;
        }
@@ -692,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
                        if (j == X86_PMC_IDX_MAX)
                                break;
 
-                       set_bit(j, used_mask);
+                       __set_bit(j, used_mask);
 
                        if (assign)
                                assign[i] = j;
@@ -842,7 +842,7 @@ void hw_perf_enable(void)
                         * clear active_mask and events[] yet it preserves
                         * idx
                         */
-                       set_bit(hwc->idx, cpuc->active_mask);
+                       __set_bit(hwc->idx, cpuc->active_mask);
                        cpuc->events[hwc->idx] = event;
 
                        x86_pmu.enable(event);
@@ -1057,7 +1057,7 @@ static void x86_pmu_stop(struct perf_event *event)
         * Must be done before we disable, otherwise the nmi handler
         * could reenable again:
         */
-       clear_bit(idx, cpuc->active_mask);
+       __clear_bit(idx, cpuc->active_mask);
        x86_pmu.disable(event);
 
        /*
index 014528ba7d57d77ad3ada0cba3eeb67b5ab7488d..573458f1caf23c91ab39930fc0613cf78d27d55f 100644 (file)
@@ -287,7 +287,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
         * initialize all possible NB constraints
         */
        for (i = 0; i < x86_pmu.num_events; i++) {
-               set_bit(i, nb->event_constraints[i].idxmsk);
+               __set_bit(i, nb->event_constraints[i].idxmsk);
                nb->event_constraints[i].weight = 1;
        }
        return nb;
index a84094897799f0b078a6366606ed43e61d7148e5..d87421c3f55b09a877c7c63c1c4e3da7d0022ed0 100644 (file)
@@ -765,7 +765,6 @@ again:
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
-               clear_bit(bit, (unsigned long *) &status);
                if (!test_bit(bit, cpuc->active_mask))
                        continue;