*
* Copyright (C) 2008-2009 Intel Corporation.
* Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
- *
*/
-
-#include <linux/module.h>
-#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
-#include <linux/kallsyms.h>
-#include <linux/mutex.h>
+#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/smp.h>
+#include <linux/fs.h>
#include <asm/ds.h>
#define SIZEOF_BTS (1 << 13)
-/* The tracer mutex protects the below per-cpu tracer array.
- It needs to be held to:
- - start tracing on all cpus
- - stop tracing on all cpus
- - start tracing on a single hotplug cpu
- - stop tracing on a single hotplug cpu
- - read the trace from all cpus
- - read the trace from a single cpu
-*/
-static DEFINE_MUTEX(bts_tracer_mutex);
+/*
+ * The tracer lock protects the below per-cpu tracer array.
+ * It needs to be held to:
+ * - start tracing on all cpus
+ * - stop tracing on all cpus
+ * - start tracing on a single hotplug cpu
+ * - stop tracing on a single hotplug cpu
+ * - read the trace from all cpus
+ * - read the trace from a single cpu
+ */
+static DEFINE_SPINLOCK(bts_tracer_lock);
static DEFINE_PER_CPU(struct bts_tracer *, tracer);
static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
* Start tracing on the current cpu.
* The argument is ignored.
*
- * pre: bts_tracer_mutex must be locked.
+ * pre: bts_tracer_lock must be locked.
*/
static void bts_trace_start_cpu(void *arg)
{
static void bts_trace_start(struct trace_array *tr)
{
- mutex_lock(&bts_tracer_mutex);
+ spin_lock(&bts_tracer_lock);
on_each_cpu(bts_trace_start_cpu, NULL, 1);
trace_hw_branches_enabled = 1;
- mutex_unlock(&bts_tracer_mutex);
+ spin_unlock(&bts_tracer_lock);
}
/*
* Stop tracing on the current cpu.
* The argument is ignored.
*
- * pre: bts_tracer_mutex must be locked.
+ * pre: bts_tracer_lock must be locked.
*/
static void bts_trace_stop_cpu(void *arg)
{
static void bts_trace_stop(struct trace_array *tr)
{
- mutex_lock(&bts_tracer_mutex);
+ spin_lock(&bts_tracer_lock);
trace_hw_branches_enabled = 0;
on_each_cpu(bts_trace_stop_cpu, NULL, 1);
- mutex_unlock(&bts_tracer_mutex);
+ spin_unlock(&bts_tracer_lock);
}
static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
- mutex_lock(&bts_tracer_mutex);
+ spin_lock(&bts_tracer_lock);
if (!trace_hw_branches_enabled)
goto out;
}
out:
- mutex_unlock(&bts_tracer_mutex);
+ spin_unlock(&bts_tracer_lock);
return NOTIFY_DONE;
}
/*
* Collect the trace on the current cpu and write it into the ftrace buffer.
*
- * pre: bts_tracer_mutex must be locked
+ * pre: bts_tracer_lock must be locked
*/
static void trace_bts_cpu(void *arg)
{
static void trace_bts_prepare(struct trace_iterator *iter)
{
- mutex_lock(&bts_tracer_mutex);
+ spin_lock(&bts_tracer_lock);
on_each_cpu(trace_bts_cpu, iter->tr, 1);
- mutex_unlock(&bts_tracer_mutex);
+ spin_unlock(&bts_tracer_lock);
}
static void trace_bts_close(struct trace_iterator *iter)
void trace_hw_branch_oops(void)
{
- mutex_lock(&bts_tracer_mutex);
+ spin_lock(&bts_tracer_lock);
trace_bts_cpu(hw_branch_trace);
- mutex_unlock(&bts_tracer_mutex);
+ spin_unlock(&bts_tracer_lock);
}
struct tracer bts_tracer __read_mostly =