perf counters: consolidate hw_perf save/restore APIs
authorIngo Molnar <mingo@elte.hu>
Thu, 11 Dec 2008 12:45:51 +0000 (13:45 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 11 Dec 2008 14:45:53 +0000 (15:45 +0100)
Impact: cleanup

Rename them to better match up the usual IRQ disable/enable APIs:

 hw_perf_disable_all()  => hw_perf_save_disable()
 hw_perf_restore_ctrl() => hw_perf_restore()

Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c
drivers/acpi/processor_idle.c
include/linux/perf_counter.h
kernel/perf_counter.c

index 43c8e9a38b4e2d702b81a5797a3a2396cce3d406..3e1dbebe22b9cfe545423c27d1d0e1aa3d0cd0b0 100644 (file)
@@ -118,13 +118,13 @@ void hw_perf_enable_all(void)
        wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
 }
 
-void hw_perf_restore_ctrl(u64 ctrl)
+void hw_perf_restore(u64 ctrl)
 {
        wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0);
 }
-EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl);
+EXPORT_SYMBOL_GPL(hw_perf_restore);
 
-u64 hw_perf_disable_all(void)
+u64 hw_perf_save_disable(void)
 {
        u64 ctrl;
 
@@ -132,7 +132,7 @@ u64 hw_perf_disable_all(void)
        wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
        return ctrl;
 }
-EXPORT_SYMBOL_GPL(hw_perf_disable_all);
+EXPORT_SYMBOL_GPL(hw_perf_save_disable);
 
 static inline void
 __x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx)
index cca804e6f1dd451bacea8437baff4ee3283bea5a..a3e66a33b7a2ecf4cb87c47fc74078f4236eac14 100644 (file)
@@ -270,11 +270,11 @@ static atomic_t c3_cpu_count;
 /* Common C-state entry for C2, C3, .. */
 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
 {
-       u64 pctrl;
+       u64 perf_flags;
 
        /* Don't trace irqs off for idle */
        stop_critical_timings();
-       pctrl = hw_perf_disable_all();
+       perf_flags = hw_perf_save_disable();
        if (cstate->entry_method == ACPI_CSTATE_FFH) {
                /* Call into architectural FFH based C-state */
                acpi_processor_ffh_cstate_enter(cstate);
@@ -287,7 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
                   gets asserted in time to freeze execution properly. */
                unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
        }
-       hw_perf_restore_ctrl(pctrl);
+       hw_perf_restore(perf_flags);
        start_critical_timings();
 }
 #endif /* !CONFIG_CPU_IDLE */
@@ -1433,7 +1433,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
 
        /* Don't trace irqs off for idle */
        stop_critical_timings();
-       pctrl = hw_perf_disable_all();
+       pctrl = hw_perf_save_disable();
        if (cx->entry_method == ACPI_CSTATE_FFH) {
                /* Call into architectural FFH based C-state */
                acpi_processor_ffh_cstate_enter(cx);
@@ -1448,7 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
                   gets asserted in time to freeze execution properly. */
                unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
        }
-       hw_perf_restore_ctrl(pctrl);
+       hw_perf_restore(pctrl);
        start_critical_timings();
 }
 
index 9a1713a1be277a17661cf242033527a31c4ec7f7..68f6e3ad531fb55a488106b645a6f364475399c0 100644 (file)
@@ -67,7 +67,7 @@ enum perf_counter_record_type {
  * Hardware event to monitor via a performance monitoring counter:
  */
 struct perf_counter_hw_event {
-       u64                     type;
+       s64                     type;
 
        u64                     irq_period;
        u32                     record_type;
@@ -206,8 +206,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu);
 extern void perf_counter_init_task(struct task_struct *task);
 extern void perf_counter_notify(struct pt_regs *regs);
 extern void perf_counter_print_debug(void);
-extern void hw_perf_restore_ctrl(u64 ctrl);
-extern u64 hw_perf_disable_all(void);
+extern u64 hw_perf_save_disable(void);
+extern void hw_perf_restore(u64 ctrl);
 extern void atomic64_counter_set(struct perf_counter *counter, u64 val64);
 extern u64 atomic64_counter_read(struct perf_counter *counter);
 
@@ -221,8 +221,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu)           { }
 static inline void perf_counter_init_task(struct task_struct *task)    { }
 static inline void perf_counter_notify(struct pt_regs *regs)           { }
 static inline void perf_counter_print_debug(void)                      { }
-static inline void hw_perf_restore_ctrl(u64 ctrl)                      { }
-static inline u64 hw_perf_disable_all(void)                  { return 0; }
+static inline void hw_perf_restore(u64 ctrl)                   { }
+static inline u64 hw_perf_save_disable(void)                 { return 0; }
 #endif
 
 #endif /* _LINUX_PERF_COUNTER_H */
index 506286e5ba634f75a734e4cd1abc474eb1eaf3a4..0e93fea17120c0a2b98da8dca8be4eba9c9ee7dc 100644 (file)
@@ -43,8 +43,8 @@ hw_perf_counter_init(struct perf_counter *counter)
        return ERR_PTR(-EINVAL);
 }
 
-u64 __weak hw_perf_disable_all(void)           { return 0; }
-void __weak hw_perf_restore_ctrl(u64 ctrl)     { }
+u64 __weak hw_perf_save_disable(void)          { return 0; }
+void __weak hw_perf_restore(u64 ctrl)  { }
 void __weak hw_perf_counter_setup(void)                { }
 
 #if BITS_PER_LONG == 64
@@ -180,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info)
         * Protect the list operation against NMI by disabling the
         * counters on a global level. NOP for non NMI based counters.
         */
-       perf_flags = hw_perf_disable_all();
+       perf_flags = hw_perf_save_disable();
        list_del_counter(counter, ctx);
-       hw_perf_restore_ctrl(perf_flags);
+       hw_perf_restore(perf_flags);
 
        if (!ctx->task) {
                /*
@@ -273,9 +273,9 @@ static void __perf_install_in_context(void *info)
         * Protect the list operation against NMI by disabling the
         * counters on a global level. NOP for non NMI based counters.
         */
-       perf_flags = hw_perf_disable_all();
+       perf_flags = hw_perf_save_disable();
        list_add_counter(counter, ctx);
-       hw_perf_restore_ctrl(perf_flags);
+       hw_perf_restore(perf_flags);
 
        ctx->nr_counters++;
 
@@ -495,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
        /*
         * Rotate the first entry last (works just fine for group counters too):
         */
-       perf_flags = hw_perf_disable_all();
+       perf_flags = hw_perf_save_disable();
        list_for_each_entry(counter, &ctx->counter_list, list_entry) {
                list_del(&counter->list_entry);
                list_add_tail(&counter->list_entry, &ctx->counter_list);
                break;
        }
-       hw_perf_restore_ctrl(perf_flags);
+       hw_perf_restore(perf_flags);
 
        spin_unlock(&ctx->lock);