net: cls_bpf: add hardware offload
authorJakub Kicinski <jakub.kicinski@netronome.com>
Wed, 21 Sep 2016 10:43:53 +0000 (11:43 +0100)
committerDavid S. Miller <davem@davemloft.net>
Wed, 21 Sep 2016 23:50:02 +0000 (19:50 -0400)
This patch adds hardware offload capability to cls_bpf classifier,
similar to what have been done with U32 and flower.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/netdevice.h
include/net/pkt_cls.h
net/sched/cls_bpf.c

index a10d8d18ce1999d82db45747e468daef9df44fbd..69f242c71865fef48afeca7079ea9d9e45a4b468 100644 (file)
@@ -789,6 +789,7 @@ enum {
        TC_SETUP_CLSU32,
        TC_SETUP_CLSFLOWER,
        TC_SETUP_MATCHALL,
+       TC_SETUP_CLSBPF,
 };
 
 struct tc_cls_u32_offload;
@@ -800,6 +801,7 @@ struct tc_to_netdev {
                struct tc_cls_u32_offload *cls_u32;
                struct tc_cls_flower_offload *cls_flower;
                struct tc_cls_matchall_offload *cls_mall;
+               struct tc_cls_bpf_offload *cls_bpf;
        };
 };
 
index a459be5fe1c2a937d92253d4f6bfe8e762acd84f..41e8071dff87044adc1c24f15079e5f73296afae 100644 (file)
@@ -486,4 +486,18 @@ struct tc_cls_matchall_offload {
        unsigned long cookie;
 };
 
+enum tc_clsbpf_command {
+       TC_CLSBPF_ADD,
+       TC_CLSBPF_REPLACE,
+       TC_CLSBPF_DESTROY,
+};
+
+struct tc_cls_bpf_offload {
+       enum tc_clsbpf_command command;
+       struct tcf_exts *exts;
+       struct bpf_prog *prog;
+       const char *name;
+       bool exts_integrated;
+};
+
 #endif
index c6f7a47541ebfb20cf10d159fbbf0f51eeb648da..6523c5b4c0a5d504de3b21d48eb50969796baa90 100644 (file)
@@ -39,6 +39,7 @@ struct cls_bpf_prog {
        struct list_head link;
        struct tcf_result res;
        bool exts_integrated;
+       bool offloaded;
        struct tcf_exts exts;
        u32 handle;
        union {
@@ -138,6 +139,71 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
        return !prog->bpf_ops;
 }
 
+static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
+                              enum tc_clsbpf_command cmd)
+{
+       struct net_device *dev = tp->q->dev_queue->dev;
+       struct tc_cls_bpf_offload bpf_offload = {};
+       struct tc_to_netdev offload;
+
+       offload.type = TC_SETUP_CLSBPF;
+       offload.cls_bpf = &bpf_offload;
+
+       bpf_offload.command = cmd;
+       bpf_offload.exts = &prog->exts;
+       bpf_offload.prog = prog->filter;
+       bpf_offload.name = prog->bpf_name;
+       bpf_offload.exts_integrated = prog->exts_integrated;
+
+       return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                            tp->protocol, &offload);
+}
+
+static void cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
+                           struct cls_bpf_prog *oldprog)
+{
+       struct net_device *dev = tp->q->dev_queue->dev;
+       struct cls_bpf_prog *obj = prog;
+       enum tc_clsbpf_command cmd;
+
+       if (oldprog && oldprog->offloaded) {
+               if (tc_should_offload(dev, tp, 0)) {
+                       cmd = TC_CLSBPF_REPLACE;
+               } else {
+                       obj = oldprog;
+                       cmd = TC_CLSBPF_DESTROY;
+               }
+       } else {
+               if (!tc_should_offload(dev, tp, 0))
+                       return;
+               cmd = TC_CLSBPF_ADD;
+       }
+
+       if (cls_bpf_offload_cmd(tp, obj, cmd))
+               return;
+
+       obj->offloaded = true;
+       if (oldprog)
+               oldprog->offloaded = false;
+}
+
+static void cls_bpf_stop_offload(struct tcf_proto *tp,
+                                struct cls_bpf_prog *prog)
+{
+       int err;
+
+       if (!prog->offloaded)
+               return;
+
+       err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
+       if (err) {
+               pr_err("Stopping hardware offload failed: %d\n", err);
+               return;
+       }
+
+       prog->offloaded = false;
+}
+
 static int cls_bpf_init(struct tcf_proto *tp)
 {
        struct cls_bpf_head *head;
@@ -177,6 +243,7 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
 {
        struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
 
+       cls_bpf_stop_offload(tp, prog);
        list_del_rcu(&prog->link);
        tcf_unbind_filter(tp, &prog->res);
        call_rcu(&prog->rcu, __cls_bpf_delete_prog);
@@ -193,6 +260,7 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
                return false;
 
        list_for_each_entry_safe(prog, tmp, &head->plist, link) {
+               cls_bpf_stop_offload(tp, prog);
                list_del_rcu(&prog->link);
                tcf_unbind_filter(tp, &prog->res);
                call_rcu(&prog->rcu, __cls_bpf_delete_prog);
@@ -415,6 +483,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
        if (ret < 0)
                goto errout;
 
+       cls_bpf_offload(tp, prog, oldprog);
+
        if (oldprog) {
                list_replace_rcu(&oldprog->link, &prog->link);
                tcf_unbind_filter(tp, &oldprog->res);