bpf: sanitize bpf tracepoint access
authorAlexei Starovoitov <ast@fb.com>
Thu, 7 Apr 2016 01:43:28 +0000 (18:43 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 8 Apr 2016 01:04:26 +0000 (21:04 -0400)
during bpf program loading remember the last byte of ctx access
and at the time of attaching the program to tracepoint check that
the program doesn't access bytes beyond defined in tracepoint fields

This also disallows access to __dynamic_array fields, but can be
relaxed in the future.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/bpf.h
include/linux/trace_events.h
kernel/bpf/verifier.c
kernel/events/core.c
kernel/trace/trace_events.c

index 198f6ace70ecf8cc27b30a9f511f7bd35f55caac..b2365a6eba3dc86d5d0888a0a98d1077bd943ab0 100644 (file)
@@ -131,6 +131,7 @@ struct bpf_prog_type_list {
 struct bpf_prog_aux {
        atomic_t refcnt;
        u32 used_map_cnt;
+       u32 max_ctx_offset;
        const struct bpf_verifier_ops *ops;
        struct bpf_map **used_maps;
        struct bpf_prog *prog;
index 56f795e6a09300501ae80c440b29973439462929..fe6441203b59f4f3a1459e31cdb334c6b925816e 100644 (file)
@@ -569,6 +569,7 @@ extern int trace_define_field(struct trace_event_call *call, const char *type,
                              int is_signed, int filter_type);
 extern int trace_add_event_call(struct trace_event_call *call);
 extern int trace_remove_event_call(struct trace_event_call *call);
+extern int trace_event_get_offsets(struct trace_event_call *call);
 
 #define is_signed_type(type)   (((type)(-1)) < (type)1)
 
index 2e08f8e9b771f032a17e0305c60fcd6f0403e6f5..58792fed5678b5878dc0216567a883016f98de28 100644 (file)
@@ -652,8 +652,12 @@ static int check_ctx_access(struct verifier_env *env, int off, int size,
                            enum bpf_access_type t)
 {
        if (env->prog->aux->ops->is_valid_access &&
-           env->prog->aux->ops->is_valid_access(off, size, t))
+           env->prog->aux->ops->is_valid_access(off, size, t)) {
+               /* remember the offset of last byte accessed in ctx */
+               if (env->prog->aux->max_ctx_offset < off + size)
+                       env->prog->aux->max_ctx_offset = off + size;
                return 0;
+       }
 
        verbose("invalid bpf_context access off=%d size=%d\n", off, size);
        return -EACCES;
index e5ffe97d61662999c7f42b2e81ee944d820afe8c..9a01019ff7c83c78676b54af4447226660837d36 100644 (file)
@@ -7133,6 +7133,14 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
                return -EINVAL;
        }
 
+       if (is_tracepoint) {
+               int off = trace_event_get_offsets(event->tp_event);
+
+               if (prog->aux->max_ctx_offset > off) {
+                       bpf_prog_put(prog);
+                       return -EACCES;
+               }
+       }
        event->tp_event->prog = prog;
 
        return 0;
index 05ddc0820771eb7bc456aab9e2f0e5167ce7f023..ced963049e0aa4b04cdb4314443e48d71135fd44 100644 (file)
@@ -204,6 +204,24 @@ static void trace_destroy_fields(struct trace_event_call *call)
        }
 }
 
+/*
+ * run-time version of trace_event_get_offsets_<call>() that returns the last
+ * accessible offset of trace fields excluding __dynamic_array bytes
+ */
+int trace_event_get_offsets(struct trace_event_call *call)
+{
+       struct ftrace_event_field *tail;
+       struct list_head *head;
+
+       head = trace_get_fields(call);
+       /*
+        * head->next points to the last field with the largest offset,
+        * since it was added last by trace_define_field()
+        */
+       tail = list_first_entry(head, struct ftrace_event_field, link);
+       return tail->offset + tail->size;
+}
+
 int trace_event_raw_init(struct trace_event_call *call)
 {
        int id;