oprofile/x86: warn user if a counter is already active
authorRobert Richter <robert.richter@amd.com>
Tue, 23 Feb 2010 17:14:58 +0000 (18:14 +0100)
committerRobert Richter <robert.richter@amd.com>
Fri, 26 Feb 2010 14:14:03 +0000 (15:14 +0100)
This patch generates a warning if a counter is already active.

Implemented for AMD and P6 models. P4 is not supported.

Cc: Naga Chumbalkar <nagananda.chumbalkar@hp.com>
Cc: Shashi Belur <shashi-kiran.belur@hp.com>
Cc: Tony Jones <tonyj@suse.de>
Signed-off-by: Robert Richter <robert.richter@amd.com>
arch/x86/oprofile/op_model_amd.c
arch/x86/oprofile/op_model_ppro.c
arch/x86/oprofile/op_x86_model.h

index a9d194734a8e5b2e9fe3c2350d138f84f708f7d4..ef9d735dea35baf3bb4572ed1a26d94e19f52329 100644 (file)
@@ -194,9 +194,18 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
 
        /* clear all counters */
        for (i = 0; i < NUM_CONTROLS; ++i) {
-               if (unlikely(!msrs->controls[i].addr))
+               if (unlikely(!msrs->controls[i].addr)) {
+                       if (counter_config[i].enabled && !smp_processor_id())
+                               /*
+                                * counter is reserved, this is on all
+                                * cpus, so report only for cpu #0
+                                */
+                               op_x86_warn_reserved(i);
                        continue;
+               }
                rdmsrl(msrs->controls[i].addr, val);
+               if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+                       op_x86_warn_in_use(i);
                val &= model->reserved;
                wrmsrl(msrs->controls[i].addr, val);
        }
index 8eb05878554cf382a0cf482947a8ebf7a93fdeeb..c344525ebb55facfb96c5a01f184535306f103da 100644 (file)
@@ -82,9 +82,18 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
 
        /* clear all counters */
        for (i = 0; i < num_counters; ++i) {
-               if (unlikely(!msrs->controls[i].addr))
+               if (unlikely(!msrs->controls[i].addr)) {
+                       if (counter_config[i].enabled && !smp_processor_id())
+                               /*
+                                * counter is reserved, this is on all
+                                * cpus, so report only for cpu #0
+                                */
+                               op_x86_warn_reserved(i);
                        continue;
+               }
                rdmsrl(msrs->controls[i].addr, val);
+               if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+                       op_x86_warn_in_use(i);
                val &= model->reserved;
                wrmsrl(msrs->controls[i].addr, val);
        }
index 7b8e75d1608125aa81e7069d781d08bf15052e40..59fa2bdb0da3216bb029748cc32d6961b72c3d25 100644 (file)
@@ -57,6 +57,17 @@ struct op_x86_model_spec {
 
 struct op_counter_config;
 
+static inline void op_x86_warn_in_use(int counter)
+{
+       pr_warning("oprofile: counter #%d on cpu #%d may already be used\n",
+                  counter, smp_processor_id());
+}
+
+static inline void op_x86_warn_reserved(int counter)
+{
+       pr_warning("oprofile: counter #%d is already reserved\n", counter);
+}
+
 extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
                           struct op_counter_config *counter_config);
 extern int op_x86_phys_to_virt(int phys);