return -EIO;
}
-static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+static bool rtl_skb_pad(struct sk_buff *skb)
+{
+ if (skb_padto(skb, ETH_ZLEN))
+ return false;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ return true;
+}
+
+static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
+{
+ return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
+}
+
+static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb);
+ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+ return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
+
if (ip->protocol == IPPROTO_TCP)
opts[offset] |= info->checksum.tcp;
else if (ip->protocol == IPPROTO_UDP)
opts[offset] |= info->checksum.udp;
else
WARN_ON_ONCE(1);
+ } else {
+ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+ return rtl_skb_pad(skb);
}
+ return true;
}
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
goto err_stop_0;
}
- /* 8168evl does not automatically pad to minimum length. */
- if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
- skb->len < ETH_ZLEN)) {
- if (skb_padto(skb, ETH_ZLEN))
- goto err_update_stats;
- skb_put(skb, ETH_ZLEN - skb->len);
- }
-
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop_0;
+ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
+ opts[0] = DescOwn;
+
+ if (!rtl8169_tso_csum(tp, skb, opts))
+ goto err_update_stats;
+
len = skb_headlen(skb);
mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
- opts[0] = DescOwn;
-
- rtl8169_tso_csum(tp, skb, opts);
-
frags = rtl8169_xmit_frags(tp, skb, opts);
if (frags < 0)
goto err_dma_1;