int br_hook;
vg = nbp_vlan_group_rcu(to);
- skb = br_handle_vlan(to->br, vg, skb);
+ skb = br_handle_vlan(to->br, to, vg, skb);
if (!skb)
return;
#include <linux/export.h>
#include <linux/rculist.h>
#include "br_private.h"
+#include "br_private_tunnel.h"
/* Hook for brouter */
br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
indev = skb->dev;
skb->dev = brdev;
- skb = br_handle_vlan(br, vg, skb);
+ skb = br_handle_vlan(br, NULL, vg, skb);
if (!skb)
return NET_RX_DROP;
/* update the multicast stats if the packet is IGMP/MLD */
return RX_HANDLER_CONSUMED;
p = br_port_get_rcu(skb->dev);
+ if (p->flags & BR_VLAN_TUNNEL) {
+ if (br_handle_ingress_vlan_tunnel(skb, p,
+ nbp_vlan_group_rcu(p)))
+ goto drop;
+ }
if (unlikely(is_link_local_ether_addr(dest))) {
u16 fwd_mask = p->br->group_fwd_mask_required;
const struct sk_buff *skb);
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
struct sk_buff *br_handle_vlan(struct net_bridge *br,
+ const struct net_bridge_port *port,
struct net_bridge_vlan_group *vg,
struct sk_buff *skb);
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
}
static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
+ const struct net_bridge_port *port,
struct net_bridge_vlan_group *vg,
struct sk_buff *skb)
{
void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port);
void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan);
+int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
+ struct net_bridge_port *p,
+ struct net_bridge_vlan_group *vg);
+int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
+ struct net_bridge_vlan *vlan);
#else
static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
{
{
}
+static inline int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
+ struct net_bridge_port *p,
+ struct net_bridge_vlan_group *vg)
+{
+ return 0;
+}
#endif
#endif
}
struct sk_buff *br_handle_vlan(struct net_bridge *br,
+ const struct net_bridge_port *p,
struct net_bridge_vlan_group *vg,
struct sk_buff *skb)
{
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
skb->vlan_tci = 0;
+
+ if (p && (p->flags & BR_VLAN_TUNNEL) &&
+ br_handle_egress_vlan_tunnel(skb, v)) {
+ kfree_skb(skb);
+ return NULL;
+ }
out:
return skb;
}
.automatic_shrinking = true,
};
+static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
+ u64 tunnel_id)
+{
+ return rhashtable_lookup_fast(tbl, &tunnel_id,
+ br_vlan_tunnel_rht_params);
+}
+
void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan)
{
{
rhashtable_destroy(&vg->tunnel_hash);
}
+
+int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
+ struct net_bridge_port *p,
+ struct net_bridge_vlan_group *vg)
+{
+ struct ip_tunnel_info *tinfo = skb_tunnel_info(skb);
+ struct net_bridge_vlan *vlan;
+
+ if (!vg || !tinfo)
+ return 0;
+
+ /* if already tagged, ignore */
+ if (skb_vlan_tagged(skb))
+ return 0;
+
+ /* lookup vid, given tunnel id */
+ vlan = br_vlan_tunnel_lookup(&vg->tunnel_hash, tinfo->key.tun_id);
+ if (!vlan)
+ return 0;
+
+ skb_dst_drop(skb);
+
+ __vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
+
+ return 0;
+}
+
+int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
+ struct net_bridge_vlan *vlan)
+{
+ int err;
+
+ if (!vlan || !vlan->tinfo.tunnel_id)
+ return 0;
+
+ if (unlikely(!skb_vlan_tag_present(skb)))
+ return 0;
+
+ skb_dst_drop(skb);
+ err = skb_vlan_pop(skb);
+ if (err)
+ return err;
+
+ skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
+
+ return 0;
+}