bpf: Add BPF_(PROG|MAP)_GET_NEXT_ID command
authorMartin KaFai Lau <kafai@fb.com>
Mon, 5 Jun 2017 19:15:48 +0000 (12:15 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 6 Jun 2017 19:41:23 +0000 (15:41 -0400)
This patch adds BPF_PROG_GET_NEXT_ID and BPF_MAP_GET_NEXT_ID
to allow userspace to iterate all bpf_prog IDs and bpf_map IDs.

The API is trying to be consistent with the existing
BPF_MAP_GET_NEXT_KEY.

It is currently limited to CAP_SYS_ADMIN which we can
consider to lift it in followup patches.

Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Alexei Starovoitov <ast@fb.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/uapi/linux/bpf.h
kernel/bpf/syscall.c

index e78aece036280292cac154f84349922420cd2800..629747a3f273d1d901fbafe71775537b28b40c1c 100644 (file)
@@ -82,6 +82,8 @@ enum bpf_cmd {
        BPF_PROG_ATTACH,
        BPF_PROG_DETACH,
        BPF_PROG_TEST_RUN,
+       BPF_PROG_GET_NEXT_ID,
+       BPF_MAP_GET_NEXT_ID,
 };
 
 enum bpf_map_type {
@@ -209,6 +211,11 @@ union bpf_attr {
                __u32           repeat;
                __u32           duration;
        } test;
+
+       struct { /* anonymous struct used by BPF_*_GET_NEXT_ID */
+               __u32           start_id;
+               __u32           next_id;
+       };
 } __attribute__((aligned(8)));
 
 /* BPF helper function descriptions:
index 4c3075b5d8403e9aea47ca9b4b3f5fab25617257..2405feedb8c1bf5c411be645f84046f5fac9eb80 100644 (file)
@@ -166,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map)
 void bpf_map_put(struct bpf_map *map)
 {
        if (atomic_dec_and_test(&map->refcnt)) {
+               /* bpf_map_free_id() must be called first */
                bpf_map_free_id(map);
                INIT_WORK(&map->work, bpf_map_free_deferred);
                schedule_work(&map->work);
@@ -726,6 +727,7 @@ void bpf_prog_put(struct bpf_prog *prog)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
                trace_bpf_prog_put_rcu(prog);
+               /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog);
                bpf_prog_kallsyms_del(prog);
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
@@ -1069,6 +1071,34 @@ static int bpf_prog_test_run(const union bpf_attr *attr,
        return ret;
 }
 
+#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
+
+static int bpf_obj_get_next_id(const union bpf_attr *attr,
+                              union bpf_attr __user *uattr,
+                              struct idr *idr,
+                              spinlock_t *lock)
+{
+       u32 next_id = attr->start_id;
+       int err = 0;
+
+       if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       next_id++;
+       spin_lock_bh(lock);
+       if (!idr_get_next(idr, &next_id))
+               err = -ENOENT;
+       spin_unlock_bh(lock);
+
+       if (!err)
+               err = put_user(next_id, &uattr->next_id);
+
+       return err;
+}
+
 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
 {
        union bpf_attr attr = {};
@@ -1146,6 +1176,14 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_PROG_TEST_RUN:
                err = bpf_prog_test_run(&attr, uattr);
                break;
+       case BPF_PROG_GET_NEXT_ID:
+               err = bpf_obj_get_next_id(&attr, uattr,
+                                         &prog_idr, &prog_idr_lock);
+               break;
+       case BPF_MAP_GET_NEXT_ID:
+               err = bpf_obj_get_next_id(&attr, uattr,
+                                         &map_idr, &map_idr_lock);
+               break;
        default:
                err = -EINVAL;
                break;