bpf, trace: fetch current cpu only once
authorDaniel Borkmann <daniel@iogearbox.net>
Tue, 28 Jun 2016 10:18:24 +0000 (12:18 +0200)
committerDavid S. Miller <davem@davemloft.net>
Thu, 30 Jun 2016 09:54:40 +0000 (05:54 -0400)
We currently have two invocations, which is unnecessary. Fetch it only
once and use the smp_processor_id() variant, so we also get preemption
checks along with it when DEBUG_PREEMPT is set.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
kernel/trace/bpf_trace.c

index 4e61f74a5d734373d995bf6b9b9789eac5b43960..505f9e9cdb3ba380d6bfc121fb4343375ff428d8 100644 (file)
@@ -233,6 +233,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
        struct pt_regs *regs = (struct pt_regs *) (long) r1;
        struct bpf_map *map = (struct bpf_map *) (long) r2;
        struct bpf_array *array = container_of(map, struct bpf_array, map);
+       unsigned int cpu = smp_processor_id();
        u64 index = flags & BPF_F_INDEX_MASK;
        void *data = (void *) (long) r4;
        struct perf_sample_data sample_data;
@@ -246,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
        if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
                return -EINVAL;
        if (index == BPF_F_CURRENT_CPU)
-               index = raw_smp_processor_id();
+               index = cpu;
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
@@ -259,7 +260,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
                     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
                return -EINVAL;
 
-       if (unlikely(event->oncpu != smp_processor_id()))
+       if (unlikely(event->oncpu != cpu))
                return -EOPNOTSUPP;
 
        perf_sample_data_init(&sample_data, 0, 0);