struct tg3_napi *tnapi;
struct netdev_queue *txq;
unsigned int last;
+ struct iphdr *iph = NULL;
+ struct tcphdr *tcph = NULL;
+ __sum16 tcp_csum = 0, ip_csum = 0;
+ __be16 ip_tot_len = 0;
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
mss = skb_shinfo(skb)->gso_size;
if (mss) {
- struct iphdr *iph;
u32 tcp_opt_len, hdr_len;
if (skb_cow_head(skb, 0))
tg3_flag(tp, TSO_BUG))
return tg3_tso_bug(tp, skb);
+ ip_csum = iph->check;
+ ip_tot_len = iph->tot_len;
iph->check = 0;
iph->tot_len = htons(mss + hdr_len);
}
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
+ tcph = tcp_hdr(skb);
+ tcp_csum = tcph->check;
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3)) {
- tcp_hdr(skb)->check = 0;
+ tcph->check = 0;
base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
- } else
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ } else {
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
if (tg3_flag(tp, HW_TSO_3)) {
mss |= (hdr_len & 0xc) << 12;
if (would_hit_hwbug) {
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+ if (mss) {
+ /* If it's a TSO packet, do GSO instead of
+ * allocating and copying to a large linear SKB
+ */
+ if (ip_tot_len) {
+ iph->check = ip_csum;
+ iph->tot_len = ip_tot_len;
+ }
+ tcph->check = tcp_csum;
+ return tg3_tso_bug(tp, skb);
+ }
+
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/