perf, bpf: minimize the size of perf_trace_() tracepoint handler
authorAlexei Starovoitov <ast@fb.com>
Tue, 19 Apr 2016 03:11:50 +0000 (20:11 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 21 Apr 2016 17:48:20 +0000 (13:48 -0400)
move trace_call_bpf() into helper function to minimize the size
of perf_trace_*() tracepoint handlers.
    text    data     bss     dec      hex filename
10541679 5526646 2945024 19013349 1221ee5 vmlinux_before
10509422 5526646 2945024 18981092 121a0e4 vmlinux_after

It may seem that perf_fetch_caller_regs() can also be moved,
but that is incorrect, since ip/sp will be wrong.

bpf+tracepoint performance is not affected, since
perf_swevent_put_recursion_context() is now inlined.
export_symbol_gpl can also be dropped.

No measurable change in normal perf tracepoints.

Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/trace_events.h
include/trace/perf.h
kernel/events/core.c

index fe6441203b59f4f3a1459e31cdb334c6b925816e..222f6aa0418f8877746fba79090ab790ab23e03a 100644 (file)
@@ -609,6 +609,11 @@ extern void ftrace_profile_free_filter(struct perf_event *event);
 void perf_trace_buf_update(void *record, u16 type);
 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
 
+void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
+                              struct trace_event_call *call, u64 count,
+                              struct pt_regs *regs, struct hlist_head *head,
+                              struct task_struct *task);
+
 static inline void
 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
                       u64 count, struct pt_regs *regs, void *head,
index a182306eefd7a825d1f40483dbc1d206ea3ef59a..88de5c205e86f7040854221baa2fe04c737daa3a 100644 (file)
@@ -64,16 +64,9 @@ perf_trace_##call(void *__data, proto)                                       \
                                                                        \
        { assign; }                                                     \
                                                                        \
-       if (prog) {                                                     \
-               *(struct pt_regs **)entry = __regs;                     \
-               if (!trace_call_bpf(prog, entry) || hlist_empty(head)) { \
-                       perf_swevent_put_recursion_context(rctx);       \
-                       return;                                         \
-               }                                                       \
-       }                                                               \
-       perf_trace_buf_submit(entry, __entry_size, rctx,                \
-                             event_call->event.type, __count, __regs,  \
-                             head, __task);                            \
+       perf_trace_run_bpf_submit(entry, __entry_size, rctx,            \
+                                 event_call, __count, __regs,          \
+                                 head, __task);                        \
 }
 
 /*
index 5056abffef2775cfd3f607e9ed270eb949cca47e..9eb23dc27462f86ee4382dda64870352dcf17922 100644 (file)
@@ -6741,7 +6741,6 @@ void perf_swevent_put_recursion_context(int rctx)
 
        put_recursion_context(swhash->recursion, rctx);
 }
-EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
 
 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
@@ -6998,6 +6997,25 @@ static int perf_tp_event_match(struct perf_event *event,
        return 1;
 }
 
+void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
+                              struct trace_event_call *call, u64 count,
+                              struct pt_regs *regs, struct hlist_head *head,
+                              struct task_struct *task)
+{
+       struct bpf_prog *prog = call->prog;
+
+       if (prog) {
+               *(struct pt_regs **)raw_data = regs;
+               if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
+                       perf_swevent_put_recursion_context(rctx);
+                       return;
+               }
+       }
+       perf_tp_event(call->event.type, count, raw_data, size, regs, head,
+                     rctx, task);
+}
+EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
+
 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
                   struct pt_regs *regs, struct hlist_head *head, int rctx,
                   struct task_struct *task)