#define UPROBE_EVENT_SYSTEM "uprobes"
+struct uprobe_trace_entry_head {
+ struct trace_entry ent;
+ unsigned long vaddr[];
+};
+
+#define SIZEOF_TRACE_ENTRY(is_return) \
+ (sizeof(struct uprobe_trace_entry_head) + \
+ sizeof(unsigned long) * (is_return ? 2 : 1))
+
+#define DATAOF_TRACE_ENTRY(entry, is_return) \
+ ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
+
struct trace_uprobe_filter {
rwlock_t rwlock;
int nr_systemwide;
struct uprobe_trace_entry_head *entry;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
- u8 *data;
+ void *data;
int size, i;
struct ftrace_event_call *call = &tu->call;
- size = sizeof(*entry) + tu->size;
-
+ size = SIZEOF_TRACE_ENTRY(false) + tu->size;
event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
size, 0, 0);
if (!event)
return 0;
entry = ring_buffer_event_data(event);
- entry->ip = instruction_pointer(regs);
- data = (u8 *)&entry[1];
+ entry->vaddr[0] = instruction_pointer(regs);
+ data = DATAOF_TRACE_ENTRY(entry, false);
for (i = 0; i < tu->nr_args; i++)
call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
static enum print_line_t
print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
{
- struct uprobe_trace_entry_head *field;
+ struct uprobe_trace_entry_head *entry;
struct trace_seq *s = &iter->seq;
struct trace_uprobe *tu;
u8 *data;
int i;
- field = (struct uprobe_trace_entry_head *)iter->ent;
+ entry = (struct uprobe_trace_entry_head *)iter->ent;
tu = container_of(event, struct trace_uprobe, call.event);
- if (!trace_seq_printf(s, "%s: (0x%lx)", tu->call.name, field->ip))
+ if (!trace_seq_printf(s, "%s: (0x%lx)", tu->call.name, entry->vaddr[0]))
goto partial;
- data = (u8 *)&field[1];
+ data = DATAOF_TRACE_ENTRY(entry, false);
for (i = 0; i < tu->nr_args; i++) {
if (!tu->args[i].type->print(s, tu->args[i].name,
- data + tu->args[i].offset, field))
+ data + tu->args[i].offset, entry))
goto partial;
}
static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
{
- int ret, i;
+ int ret, i, size;
struct uprobe_trace_entry_head field;
- struct trace_uprobe *tu = (struct trace_uprobe *)event_call->data;
+ struct trace_uprobe *tu = event_call->data;
- DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
+ DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
+ size = SIZEOF_TRACE_ENTRY(false);
/* Set argument names as fields */
for (i = 0; i < tu->nr_args; i++) {
ret = trace_define_field(event_call, tu->args[i].type->fmttype,
tu->args[i].name,
- sizeof(field) + tu->args[i].offset,
+ size + tu->args[i].offset,
tu->args[i].type->size,
tu->args[i].type->is_signed,
FILTER_OTHER);
struct ftrace_event_call *call = &tu->call;
struct uprobe_trace_entry_head *entry;
struct hlist_head *head;
- u8 *data;
- int size, __size, i;
- int rctx;
+ unsigned long ip;
+ void *data;
+ int size, rctx, i;
if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
return UPROBE_HANDLER_REMOVE;
- __size = sizeof(*entry) + tu->size;
- size = ALIGN(__size + sizeof(u32), sizeof(u64));
- size -= sizeof(u32);
+ size = SIZEOF_TRACE_ENTRY(false);
+ size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32);
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
return 0;
preempt_disable();
-
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
if (!entry)
goto out;
- entry->ip = instruction_pointer(regs);
- data = (u8 *)&entry[1];
+ ip = instruction_pointer(regs);
+ entry->vaddr[0] = ip;
+ data = DATAOF_TRACE_ENTRY(entry, false);
for (i = 0; i < tu->nr_args; i++)
call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
head = this_cpu_ptr(call->perf_events);
- perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL);
-
+ perf_trace_buf_submit(entry, size, rctx, ip, 1, regs, head, NULL);
out:
preempt_enable();
return 0;
static
int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data)
{
- struct trace_uprobe *tu = (struct trace_uprobe *)event->data;
+ struct trace_uprobe *tu = event->data;
switch (type) {
case TRACE_REG_REGISTER: