bpf: add a test case for syscalls/sys_{enter|exit}_* tracepoints
authorYonghong Song <yhs@fb.com>
Fri, 4 Aug 2017 23:00:10 +0000 (16:00 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 7 Aug 2017 21:09:48 +0000 (14:09 -0700)
Signed-off-by: Yonghong Song <yhs@fb.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
samples/bpf/Makefile
samples/bpf/syscall_tp_kern.c [new file with mode: 0644]
samples/bpf/syscall_tp_user.c [new file with mode: 0644]

index 770d46cdf9f49dad61ad64e653f1b311a0f4217e..f1010fe759fec7d5929d834ee3500777f648c77f 100644 (file)
@@ -39,6 +39,7 @@ hostprogs-y += per_socket_stats_example
 hostprogs-y += load_sock_ops
 hostprogs-y += xdp_redirect
 hostprogs-y += xdp_redirect_map
+hostprogs-y += syscall_tp
 
 # Libbpf dependencies
 LIBBPF := ../../tools/lib/bpf/bpf.o
@@ -82,6 +83,7 @@ test_map_in_map-objs := bpf_load.o $(LIBBPF) test_map_in_map_user.o
 per_socket_stats_example-objs := $(LIBBPF) cookie_uid_helper_example.o
 xdp_redirect-objs := bpf_load.o $(LIBBPF) xdp_redirect_user.o
 xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o
+syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
@@ -125,6 +127,7 @@ always += tcp_iw_kern.o
 always += tcp_clamp_kern.o
 always += xdp_redirect_kern.o
 always += xdp_redirect_map_kern.o
+always += syscall_tp_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 HOSTCFLAGS += -I$(srctree)/tools/lib/
@@ -163,6 +166,7 @@ HOSTLOADLIBES_xdp_tx_iptunnel += -lelf
 HOSTLOADLIBES_test_map_in_map += -lelf
 HOSTLOADLIBES_xdp_redirect += -lelf
 HOSTLOADLIBES_xdp_redirect_map += -lelf
+HOSTLOADLIBES_syscall_tp += -lelf
 
 # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
 #  make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c
new file mode 100644 (file)
index 0000000..9149c52
--- /dev/null
@@ -0,0 +1,62 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct syscalls_enter_open_args {
+       unsigned long long unused;
+       long syscall_nr;
+       long filename_ptr;
+       long flags;
+       long mode;
+};
+
+struct syscalls_exit_open_args {
+       unsigned long long unused;
+       long syscall_nr;
+       long ret;
+};
+
+struct bpf_map_def SEC("maps") enter_open_map = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(u32),
+       .value_size = sizeof(u32),
+       .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") exit_open_map = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(u32),
+       .value_size = sizeof(u32),
+       .max_entries = 1,
+};
+
+static __always_inline void count(void *map)
+{
+       u32 key = 0;
+       u32 *value, init_val = 1;
+
+       value = bpf_map_lookup_elem(map, &key);
+       if (value)
+               *value += 1;
+       else
+               bpf_map_update_elem(map, &key, &init_val, BPF_NOEXIST);
+}
+
+SEC("tracepoint/syscalls/sys_enter_open")
+int trace_enter_open(struct syscalls_enter_open_args *ctx)
+{
+       count((void *)&enter_open_map);
+       return 0;
+}
+
+SEC("tracepoint/syscalls/sys_exit_open")
+int trace_enter_exit(struct syscalls_exit_open_args *ctx)
+{
+       count((void *)&exit_open_map);
+       return 0;
+}
diff --git a/samples/bpf/syscall_tp_user.c b/samples/bpf/syscall_tp_user.c
new file mode 100644 (file)
index 0000000..a3cb91e
--- /dev/null
@@ -0,0 +1,71 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <linux/bpf.h>
+#include <string.h>
+#include <linux/perf_event.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdbool.h>
+#include <sys/resource.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+/* This program verifies bpf attachment to tracepoint sys_enter_* and sys_exit_*.
+ * This requires kernel CONFIG_FTRACE_SYSCALLS to be set.
+ */
+
+static void verify_map(int map_id)
+{
+       __u32 key = 0;
+       __u32 val;
+
+       if (bpf_map_lookup_elem(map_id, &key, &val) != 0) {
+               fprintf(stderr, "map_lookup failed: %s\n", strerror(errno));
+               return;
+       }
+       if (val == 0)
+               fprintf(stderr, "failed: map #%d returns value 0\n", map_id);
+}
+
+int main(int argc, char **argv)
+{
+       struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+       char filename[256];
+       int fd;
+
+       setrlimit(RLIMIT_MEMLOCK, &r);
+       snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+       if (load_bpf_file(filename)) {
+               fprintf(stderr, "%s", bpf_log_buf);
+               return 1;
+       }
+
+       /* current load_bpf_file has perf_event_open default pid = -1
+        * and cpu = 0, which permits attached bpf execution on
+        * all cpus for all pid's. bpf program execution ignores
+        * cpu affinity.
+        */
+       /* trigger some "open" operations */
+       fd = open(filename, O_RDONLY);
+       if (fd < 0) {
+               fprintf(stderr, "open failed: %s\n", strerror(errno));
+               return 1;
+       }
+       close(fd);
+
+       /* verify the map */
+       verify_map(map_fd[0]);
+       verify_map(map_fd[1]);
+
+       return 0;
+}