if (unlikely(err))
return err;
- if (vlan_tx_tag_present(skb)) {
- skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
- if (WARN_ON(!skb))
- return -ENOMEM;
-
- skb->vlan_tci = 0;
- }
+ skb = vlan_hwaccel_push_inside(skb);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
if (unlikely(err))
return err;
- if (vlan_tx_tag_present(skb)) {
- skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
- if (WARN_ON(!skb))
- return -ENOMEM;
-
- skb->vlan_tci = 0;
- }
+ skb = vlan_hwaccel_push_inside(skb);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
return skb;
}
+/*
+ * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
+ * @skb: skbuff to tag
+ *
+ * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ */
+static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
+{
+ skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
+ vlan_tx_tag_get(skb));
+ if (likely(skb))
+ skb->vlan_tci = 0;
+ return skb;
+}
+/*
+ * vlan_hwaccel_push_inside - pushes vlan tag to the payload
+ * @skb: skbuff to tag
+ *
+ * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
+ * VLAN tag from @skb->vlan_tci inside to the payload.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ */
+static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
+{
+ if (vlan_tx_tag_present(skb))
+ skb = __vlan_hwaccel_push_inside(skb);
+ return skb;
+}
+
/**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* @skb: skbuff to tag
netdev_features_t features)
{
if (vlan_tx_tag_present(skb) &&
- !vlan_hw_offload_capable(features, skb->vlan_proto)) {
- skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
- if (skb)
- skb->vlan_tci = 0;
- }
+ !vlan_hw_offload_capable(features, skb->vlan_proto))
+ skb = __vlan_hwaccel_push_inside(skb);
return skb;
}
if (vlan_tx_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
- skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
+ skb = __vlan_hwaccel_push_inside(skb);
if (unlikely(!skb)) {
/* This is actually a packet drop, but we
* don't want the code that calls this
*/
goto out;
}
- skb->vlan_tci = 0;
}
status = netdev_start_xmit(skb, dev, txq, false);
if (unlikely(err))
return err;
- if (vlan_tx_tag_present(skb)) {
- skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
- if (unlikely(!skb)
- return -ENOMEM;
-
- skb->vlan_tci = 0;
- }
+ skb = vlan_hwaccel_push_inside(skb);
+ if (unlikely(!skb))
+ return -ENOMEM;
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
if (!nskb)
return -ENOMEM;
- nskb = vlan_insert_tag_set_proto(nskb, nskb->vlan_proto,
- vlan_tx_tag_get(nskb));
+ nskb = __vlan_hwaccel_push_inside(nskb);
if (!nskb)
return -ENOMEM;
- nskb->vlan_tci = 0;
skb = nskb;
}
goto err_free_rt;
}
- if (vlan_tx_tag_present(skb)) {
- skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
- if (unlikely(!skb) {
- err = -ENOMEM;
- goto err_free_rt;
- }
- skb->vlan_tci = 0;
+ skb = vlan_hwaccel_push_inside(skb);
+ if (unlikely(!skb)) {
+ err = -ENOMEM;
+ goto err_free_rt;
}
/* Push Tunnel header. */