#define TEAR_REG(hwc) ((hwc)->last_tag)
#define SAMPL_RATE(hwc) ((hwc)->event_base)
+/* Perf hardware reserve and release functions */
+int perf_reserve_sampling(void);
+void perf_release_sampling(void);
+
#endif /* CONFIG_64BIT */
#endif /* _ASM_S390_PERF_EVENT_H */
#define PMC_INIT 0
#define PMC_RELEASE 1
+#define PMC_FAILURE 2
static void setup_pmc_cpu(void *flags)
{
int err;
struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf);
- /* XXX Improve error handling and pass a flag in the *flags
- * variable to indicate failures. Alternatively, ignore
- * (print) errors here and let the PMU functions fail if
- * the per-cpu PMU_F_RESERVED flag is not.
- */
err = 0;
switch (*((int *) flags)) {
case PMC_INIT:
"setup_pmc_cpu: released: cpuhw=%p\n", cpusf);
break;
}
+ if (err)
+ *((int *) flags) |= PMC_FAILURE;
}
static void release_pmc_hardware(void)
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
on_each_cpu(setup_pmc_cpu, &flags, 1);
+ perf_release_sampling();
}
static int reserve_pmc_hardware(void)
{
int flags = PMC_INIT;
+ int err;
+ err = perf_reserve_sampling();
+ if (err)
+ return err;
on_each_cpu(setup_pmc_cpu, &flags, 1);
+ if (flags & PMC_FAILURE) {
+ release_pmc_hardware();
+ return -ENODEV;
+ }
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
return sprintf(page, "event=0x%04llx,name=%s\n",
pmu_attr->id, attr->attr.name);
}
+
+/* Reserve/release functions for sharing perf hardware */
+static DEFINE_SPINLOCK(perf_hw_owner_lock);
+static void *perf_sampling_owner;
+
+int perf_reserve_sampling(void)
+{
+ int err;
+
+ err = 0;
+ spin_lock(&perf_hw_owner_lock);
+ if (perf_sampling_owner) {
+ pr_warn("The sampling facility is already reserved by %p\n",
+ perf_sampling_owner);
+ err = -EBUSY;
+ } else
+ perf_sampling_owner = __builtin_return_address(0);
+ spin_unlock(&perf_hw_owner_lock);
+ return err;
+}
+EXPORT_SYMBOL(perf_reserve_sampling);
+
+void perf_release_sampling(void)
+{
+ spin_lock(&perf_hw_owner_lock);
+ WARN_ON(!perf_sampling_owner);
+ perf_sampling_owner = NULL;
+ spin_unlock(&perf_hw_owner_lock);
+}
+EXPORT_SYMBOL(perf_release_sampling);
static unsigned char hws_flush_all;
static unsigned int hws_oom;
+static unsigned int hws_alert;
static struct workqueue_struct *hws_wq;
static unsigned int hws_state;
if (!(param32 & CPU_MF_INT_SF_MASK))
return;
+ if (!hws_alert)
+ return;
+
inc_irq_stat(IRQEXT_CMS);
atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
goto deallocate_exit;
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
+ hws_alert = 0;
deallocate_sdbt();
hws_state = HWS_DEALLOCATED;
if (hws_state == HWS_STOPPED) {
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
+ hws_alert = 0;
deallocate_sdbt();
}
if (hws_wq) {
hws_oom = 1;
hws_flush_all = 0;
/* now let them in, 1407 CPUMF external interrupts */
+ hws_alert = 1;
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
*/
#include <linux/oprofile.h>
+#include <linux/perf_event.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/fs.h>
MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling"
"(report cpu_type \"timer\"");
+static int __oprofile_hwsampler_start(void)
+{
+ int retval;
+
+ retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
+ if (retval)
+ return retval;
+
+ retval = hwsampler_start_all(oprofile_hw_interval);
+ if (retval)
+ hwsampler_deallocate();
+
+ return retval;
+}
+
static int oprofile_hwsampler_start(void)
{
int retval;
if (!hwsampler_running)
return timer_ops.start();
- retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
+ retval = perf_reserve_sampling();
if (retval)
return retval;
- retval = hwsampler_start_all(oprofile_hw_interval);
+ retval = __oprofile_hwsampler_start();
if (retval)
- hwsampler_deallocate();
+ perf_release_sampling();
return retval;
}
hwsampler_stop_all();
hwsampler_deallocate();
+ perf_release_sampling();
return;
}