bpf: Introduce bpf_map ID
authorMartin KaFai Lau <kafai@fb.com>
Mon, 5 Jun 2017 19:15:47 +0000 (12:15 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 6 Jun 2017 19:41:22 +0000 (15:41 -0400)
This patch generates an unique ID for each created bpf_map.
The approach is similar to the earlier patch for bpf_prog ID.

It is worth to note that the bpf_map's ID and bpf_prog's ID
are in two independent ID spaces and both have the same valid range:
[1, INT_MAX).

Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Alexei Starovoitov <ast@fb.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/bpf.h
kernel/bpf/syscall.c

index c5946d19f2caf0e50aa146d7557a7338d85de231..c32bace66d3daeef542dde8e21d5b516d796a694 100644 (file)
@@ -46,6 +46,7 @@ struct bpf_map {
        u32 max_entries;
        u32 map_flags;
        u32 pages;
+       u32 id;
        struct user_struct *user;
        const struct bpf_map_ops *ops;
        struct work_struct work;
index 2a1b32b470f186bdf18eb8e8ab0ed507ffb6754f..4c3075b5d8403e9aea47ca9b4b3f5fab25617257 100644 (file)
@@ -27,6 +27,8 @@
 DEFINE_PER_CPU(int, bpf_prog_active);
 static DEFINE_IDR(prog_idr);
 static DEFINE_SPINLOCK(prog_idr_lock);
+static DEFINE_IDR(map_idr);
+static DEFINE_SPINLOCK(map_idr_lock);
 
 int sysctl_unprivileged_bpf_disabled __read_mostly;
 
@@ -117,6 +119,29 @@ static void bpf_map_uncharge_memlock(struct bpf_map *map)
        free_uid(user);
 }
 
+static int bpf_map_alloc_id(struct bpf_map *map)
+{
+       int id;
+
+       spin_lock_bh(&map_idr_lock);
+       id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
+       if (id > 0)
+               map->id = id;
+       spin_unlock_bh(&map_idr_lock);
+
+       if (WARN_ON_ONCE(!id))
+               return -ENOSPC;
+
+       return id > 0 ? 0 : id;
+}
+
+static void bpf_map_free_id(struct bpf_map *map)
+{
+       spin_lock_bh(&map_idr_lock);
+       idr_remove(&map_idr, map->id);
+       spin_unlock_bh(&map_idr_lock);
+}
+
 /* called from workqueue */
 static void bpf_map_free_deferred(struct work_struct *work)
 {
@@ -141,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map)
 void bpf_map_put(struct bpf_map *map)
 {
        if (atomic_dec_and_test(&map->refcnt)) {
+               bpf_map_free_id(map);
                INIT_WORK(&map->work, bpf_map_free_deferred);
                schedule_work(&map->work);
        }
@@ -239,14 +265,20 @@ static int map_create(union bpf_attr *attr)
        if (err)
                goto free_map_nouncharge;
 
+       err = bpf_map_alloc_id(map);
+       if (err)
+               goto free_map;
+
        err = bpf_map_new_fd(map);
        if (err < 0)
                /* failed to allocate fd */
-               goto free_map;
+               goto free_id;
 
        trace_bpf_map_create(map, err);
        return err;
 
+free_id:
+       bpf_map_free_id(map);
 free_map:
        bpf_map_uncharge_memlock(map);
 free_map_nouncharge: