}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
+void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ struct pt_regs *regs)
+{
+ ring_buffer_unlock_commit(buffer, event);
+
+ ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
+ ftrace_trace_userstack(buffer, flags, pc);
+}
+EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
+
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
#ifdef CONFIG_STACKTRACE
static void __ftrace_trace_stack(struct ring_buffer *buffer,
unsigned long flags,
- int skip, int pc)
+ int skip, int pc, struct pt_regs *regs)
{
struct ftrace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
trace.skip = skip;
trace.entries = entry->caller;
- save_stack_trace(&trace);
+ if (regs)
+ save_stack_trace_regs(regs, &trace);
+ else
+ save_stack_trace(&trace);
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
}
+void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
+ int skip, int pc, struct pt_regs *regs)
+{
+ if (!(trace_flags & TRACE_ITER_STACKTRACE))
+ return;
+
+ __ftrace_trace_stack(buffer, flags, skip, pc, regs);
+}
+
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc)
{
if (!(trace_flags & TRACE_ITER_STACKTRACE))
return;
- __ftrace_trace_stack(buffer, flags, skip, pc);
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
}
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc)
{
- __ftrace_trace_stack(tr->buffer, flags, skip, pc);
+ __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
}
/**
local_save_flags(flags);
/* skipping 3 traces, seems to get us at the caller of this function */
- __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
+ __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
}
static DEFINE_PER_CPU(int, user_stack_count);
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc);
+void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
+ int skip, int pc, struct pt_regs *regs);
+
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
int pc);
{
}
+static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
+ unsigned long flags, int skip,
+ int pc, struct pt_regs *regs)
+{
+}
+
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc)
{
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
- trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ trace_nowake_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
}
/* Kretprobe handler */
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
- trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ trace_nowake_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
}
/* Event entry printers */