net_sched: use tcf_queue_work() in bpf filter
authorCong Wang <xiyou.wangcong@gmail.com>
Fri, 27 Oct 2017 01:24:30 +0000 (18:24 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 29 Oct 2017 13:49:30 +0000 (22:49 +0900)
Defer the tcf_exts_destroy() in RCU callback to
tc filter workqueue and get RTNL lock.

Reported-by: Chris Mi <chrism@mellanox.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Jiri Pirko <jiri@resnulli.us>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/cls_bpf.c

index 520c5027646aea5146f22d7898acfbc7dee3385a..037a3ae86829946135e2154bd83ddc722b61af17 100644 (file)
@@ -49,7 +49,10 @@ struct cls_bpf_prog {
        struct sock_filter *bpf_ops;
        const char *bpf_name;
        struct tcf_proto *tp;
-       struct rcu_head rcu;
+       union {
+               struct work_struct work;
+               struct rcu_head rcu;
+       };
 };
 
 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
@@ -257,9 +260,21 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
        kfree(prog);
 }
 
+static void cls_bpf_delete_prog_work(struct work_struct *work)
+{
+       struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
+
+       rtnl_lock();
+       __cls_bpf_delete_prog(prog);
+       rtnl_unlock();
+}
+
 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
 {
-       __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
+       struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
+
+       INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
+       tcf_queue_work(&prog->work);
 }
 
 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)