From: Auke Kok Date: Tue, 23 May 2006 17:29:41 +0000 (-0700) Subject: ixgb: fix rare early tso completion X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=96f9c2e277768099479fbed7c3b69c294b1fadef;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git ixgb: fix rare early tso completion Fix rare early completion when using TSO. This essentially is the e1000 fix, with code that was mostly already written. Another skb frag was also needed. Signed-off-by: Jesse Brandeburg Signed-off-by: Auke Kok Signed-off-by: John Ronciak --- diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index cfd67d812f0d..13181c4f1d20 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1266,6 +1266,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, struct ixgb_buffer *buffer_info; int len = skb->len; unsigned int offset = 0, size, count = 0, i; + unsigned int mss = skb_shinfo(skb)->tso_size; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; @@ -1277,6 +1278,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, while(len) { buffer_info = &tx_ring->buffer_info[i]; size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if(unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + buffer_info->length = size; buffer_info->dma = pci_map_single(adapter->pdev, @@ -1301,6 +1307,12 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, while(len) { buffer_info = &tx_ring->buffer_info[i]; size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if(unlikely(mss && (f == (nr_frags-1)) && (size == len) + && (size > 8))) + size -= 4; + buffer_info->length = size; buffer_info->dma = pci_map_page(adapter->pdev, @@ -1378,7 +1390,8 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \ - MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 \ + /* one more for TSO workaround */ + 1 static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) @@ -1416,7 +1429,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } - if (tso) + if (likely(tso)) tx_flags |= IXGB_TX_FLAGS_TSO; else if(ixgb_tx_csum(adapter, skb)) tx_flags |= IXGB_TX_FLAGS_CSUM;