Augment the traces with lock names when lockdep is available:
1) | down_read_trylock() {
1) | _spin_lock_irqsave() {
1) | /* lock_acquire: &sem->wait_lock */
1) 4.201 us | }
1) | _spin_unlock_irqrestore() {
1) | /* lock_release: &sem->wait_lock */
1) 3.523 us | }
1) | /* lock_acquire: try read &mm->mmap_sem */
1) + 13.386 us | }
1) 1.635 us | find_vma();
1) | handle_mm_fault() {
1) | __do_fault() {
1) | filemap_fault() {
1) | find_lock_page() {
1) | find_get_page() {
1) | /* lock_acquire: read rcu_read_lock */
1) | /* lock_release: rcu_read_lock */
1) 5.697 us | }
1) 8.158 us | }
1) + 11.079 us | }
1) | _spin_lock() {
1) | /* lock_acquire: __pte_lockptr(page) */
1) 3.949 us | }
1) 1.460 us | page_add_file_rmap();
1) | _spin_unlock() {
1) | /* lock_release: __pte_lockptr(page) */
1) 3.115 us | }
1) | unlock_page() {
1) 1.421 us | page_waitqueue();
1) 1.220 us | __wake_up_bit();
1) 6.519 us | }
1) + 34.328 us | }
1) + 37.452 us | }
1) | up_read() {
1) | /* lock_release: &mm->mmap_sem */
1) | _spin_lock_irqsave() {
1) | /* lock_acquire: &sem->wait_lock */
1) 3.865 us | }
1) | _spin_unlock_irqrestore() {
1) | /* lock_release: &sem->wait_lock */
1) 8.562 us | }
1) + 17.370 us | }
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: =?ISO-8859-1?Q?T=F6r=F6k?= Edwin <edwintorok@gmail.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <
1236166375.5330.7209.camel@laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
--- /dev/null
+#ifndef _TRACE_LOCKDEP_H
+#define _TRACE_LOCKDEP_H
+
+#include <linux/lockdep.h>
+#include <linux/tracepoint.h>
+
+#include <trace/lockdep_event_types.h>
+
+#endif
--- /dev/null
+
+#ifndef TRACE_EVENT_FORMAT
+# error Do not include this file directly.
+# error Unless you know what you are doing.
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lock
+
+#ifdef CONFIG_LOCKDEP
+
+TRACE_FORMAT(lock_acquire,
+ TPPROTO(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check,
+ struct lockdep_map *next_lock, unsigned long ip),
+ TPARGS(lock, subclass, trylock, read, check, next_lock, ip),
+ TPFMT("%s%s%s", trylock ? "try " : "",
+ read ? "read " : "", lock->name)
+ );
+
+TRACE_FORMAT(lock_release,
+ TPPROTO(struct lockdep_map *lock, int nested, unsigned long ip),
+ TPARGS(lock, nested, ip),
+ TPFMT("%s", lock->name)
+ );
+
+#ifdef CONFIG_LOCK_STAT
+
+TRACE_FORMAT(lock_contended,
+ TPPROTO(struct lockdep_map *lock, unsigned long ip),
+ TPARGS(lock, ip),
+ TPFMT("%s", lock->name)
+ );
+
+TRACE_FORMAT(lock_acquired,
+ TPPROTO(struct lockdep_map *lock, unsigned long ip),
+ TPARGS(lock, ip),
+ TPFMT("%s", lock->name)
+ );
+
+#endif
+#endif
+
+#undef TRACE_SYSTEM
#include <trace/sched_event_types.h>
#include <trace/irq_event_types.h>
+#include <trace/lockdep_event_types.h>
#include <trace/sched.h>
#include <trace/irq.h>
+#include <trace/lockdep.h>
#include <linux/hash.h>
#include <linux/ftrace.h>
#include <linux/stringify.h>
+#include <trace/lockdep.h>
#include <asm/sections.h>
}
EXPORT_SYMBOL_GPL(lock_set_class);
+DEFINE_TRACE(lock_acquire);
+
/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
{
unsigned long flags;
+ trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+
if (unlikely(current->lockdep_recursion))
return;
}
EXPORT_SYMBOL_GPL(lock_acquire);
+DEFINE_TRACE(lock_release);
+
void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip)
{
unsigned long flags;
+ trace_lock_release(lock, nested, ip);
+
if (unlikely(current->lockdep_recursion))
return;
lock->ip = ip;
}
+DEFINE_TRACE(lock_contended);
+
void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
+ trace_lock_contended(lock, ip);
+
if (unlikely(!lock_stat))
return;
}
EXPORT_SYMBOL_GPL(lock_contended);
+DEFINE_TRACE(lock_acquired);
+
void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
+ trace_lock_acquired(lock, ip);
+
if (unlikely(!lock_stat))
return;
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static DEFINE_SPINLOCK(trace_cmdline_lock);
+static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
- if (!spin_trylock(&trace_cmdline_lock))
+ if (!__raw_spin_trylock(&trace_cmdline_lock))
return;
idx = map_pid_to_cmdline[tsk->pid];
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
- spin_unlock(&trace_cmdline_lock);
+ __raw_spin_unlock(&trace_cmdline_lock);
}
char *trace_find_cmdline(int pid)
int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
{
- static DEFINE_SPINLOCK(trace_buf_lock);
+ static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ring_buffer_event *event;
goto out;
pause_graph_tracing();
- spin_lock_irqsave(&trace_buf_lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&trace_buf_lock);
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
len = min(len, TRACE_BUF_SIZE-1);
ring_buffer_unlock_commit(tr->buffer, event);
out_unlock:
- spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
+ __raw_spin_unlock(&trace_buf_lock);
+ raw_local_irq_restore(irq_flags);
unpause_graph_tracing();
out:
preempt_enable_notrace();
*
* static void ftrace_event_<call>(proto)
* {
- * event_trace_printk(_RET_IP_, "(<call>) " <fmt>);
+ * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
* }
*
* static int ftrace_reg_event_<call>(void)
#define _TRACE_FORMAT(call, proto, args, fmt) \
static void ftrace_event_##call(proto) \
{ \
- event_trace_printk(_RET_IP_, "(" #call ") " fmt); \
+ event_trace_printk(_RET_IP_, #call ": " fmt); \
} \
\
static int ftrace_reg_event_##call(void) \