bpf, perf: split bpf_perf_event_output
authorDaniel Borkmann <daniel@iogearbox.net>
Thu, 14 Jul 2016 16:08:04 +0000 (18:08 +0200)
committerDavid S. Miller <davem@davemloft.net>
Fri, 15 Jul 2016 21:23:56 +0000 (14:23 -0700)
Split the bpf_perf_event_output() helper as a preparation into
two parts. The new bpf_perf_event_output() will prepare the raw
record itself and test for unknown flags from BPF trace context,
where the __bpf_perf_event_output() does the core work. The
latter will be reused later on from bpf_event_output() directly.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
kernel/trace/bpf_trace.c

index 35ab1b2b041b1d9b95847c32c1a59c7a2c66a9a6..c35883a9bc11a887e00599728408e7aa179493e1 100644 (file)
@@ -233,26 +233,17 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
-static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
+static __always_inline u64
+__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
+                       u64 flags, struct perf_raw_record *raw)
 {
-       struct pt_regs *regs = (struct pt_regs *) (long) r1;
-       struct bpf_map *map = (struct bpf_map *) (long) r2;
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        unsigned int cpu = smp_processor_id();
        u64 index = flags & BPF_F_INDEX_MASK;
-       void *data = (void *) (long) r4;
        struct perf_sample_data sample_data;
        struct bpf_event_entry *ee;
        struct perf_event *event;
-       struct perf_raw_record raw = {
-               .frag = {
-                       .size = size,
-                       .data = data,
-               },
-       };
 
-       if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
-               return -EINVAL;
        if (index == BPF_F_CURRENT_CPU)
                index = cpu;
        if (unlikely(index >= array->map.max_entries))
@@ -271,11 +262,29 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
                return -EOPNOTSUPP;
 
        perf_sample_data_init(&sample_data, 0, 0);
-       sample_data.raw = &raw;
+       sample_data.raw = raw;
        perf_event_output(event, &sample_data, regs);
        return 0;
 }
 
+static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
+{
+       struct pt_regs *regs = (struct pt_regs *)(long) r1;
+       struct bpf_map *map  = (struct bpf_map *)(long) r2;
+       void *data = (void *)(long) r4;
+       struct perf_raw_record raw = {
+               .frag = {
+                       .size = size,
+                       .data = data,
+               },
+       };
+
+       if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
+               return -EINVAL;
+
+       return __bpf_perf_event_output(regs, map, flags, &raw);
+}
+
 static const struct bpf_func_proto bpf_perf_event_output_proto = {
        .func           = bpf_perf_event_output,
        .gpl_only       = true,