From: Stanislaw Gruszka Date: Fri, 25 Mar 2011 01:21:51 +0000 (+0000) Subject: myri10ge: small rx_done refactoring X-Git-Tag: MMI-PSA29.97-13-9~20061^2~15 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=b3cd965739b8677f6e04913aa535fa156b09e73d;p=GitHub%2FMotorolaMobilityLLC%2Fkernel-slsi.git myri10ge: small rx_done refactoring Avoid theoretical race condition regarding accessing dev->features NETIF_F_LRO flag, which is illustrated below. CPU1 CPU2 myri10ge_clean_rx_done(): myri10ge_set_flags(): or myri10ge_set_rx_csum(): if (dev->features & NETIF_F_LRO) setup lro dev->features |= NETIF_F_LRO or dev->features &= ~NETIF_F_LRO; if (dev->features & NETIF_F_LRO) flush lro On the way reduce myri10ge_rx_done() number of arguments and calls by moving mgp->small_bytes check into that function. That reduce code size from: text data bss dec hex filename 36644 248 100 36992 9080 drivers/net/myri10ge/myri10ge.o to: text data bss dec hex filename 36037 247 100 36384 8e20 drivers/net/myri10ge/myri10ge.o on my i686 system, what should also make myri10ge_clean_rx_done() being faster. Signed-off-by: Stanislaw Gruszka Signed-off-by: David S. Miller --- diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 1f4e8680a96a..673dc600c891 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -1312,17 +1312,26 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, * page into an skb */ static inline int -myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, - int bytes, int len, __wsum csum) +myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum, + int lro_enabled) { struct myri10ge_priv *mgp = ss->mgp; struct sk_buff *skb; struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; - int i, idx, hlen, remainder; + struct myri10ge_rx_buf *rx; + int i, idx, hlen, remainder, bytes; struct pci_dev *pdev = mgp->pdev; struct net_device *dev = mgp->dev; u8 *va; + if (len <= mgp->small_bytes) { + rx = &ss->rx_small; + bytes = mgp->small_bytes; + } else { + rx = &ss->rx_big; + bytes = mgp->big_bytes; + } + len += MXGEFW_PAD; idx = rx->cnt & rx->mask; va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; @@ -1341,7 +1350,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, remainder -= MYRI10GE_ALLOC_SIZE; } - if (dev->features & NETIF_F_LRO) { + if (lro_enabled) { rx_frags[0].page_offset += MXGEFW_PAD; rx_frags[0].size -= MXGEFW_PAD; len -= MXGEFW_PAD; @@ -1463,7 +1472,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) { struct myri10ge_rx_done *rx_done = &ss->rx_done; struct myri10ge_priv *mgp = ss->mgp; - struct net_device *netdev = mgp->dev; + unsigned long rx_bytes = 0; unsigned long rx_packets = 0; unsigned long rx_ok; @@ -1474,18 +1483,18 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) u16 length; __wsum checksum; + /* + * Prevent compiler from generating more than one ->features memory + * access to avoid theoretical race condition with functions that + * change NETIF_F_LRO flag at runtime. + */ + bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO; + while (rx_done->entry[idx].length != 0 && work_done < budget) { length = ntohs(rx_done->entry[idx].length); rx_done->entry[idx].length = 0; checksum = csum_unfold(rx_done->entry[idx].checksum); - if (length <= mgp->small_bytes) - rx_ok = myri10ge_rx_done(ss, &ss->rx_small, - mgp->small_bytes, - length, checksum); - else - rx_ok = myri10ge_rx_done(ss, &ss->rx_big, - mgp->big_bytes, - length, checksum); + rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled); rx_packets += rx_ok; rx_bytes += rx_ok * (unsigned long)length; cnt++; @@ -1497,7 +1506,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) ss->stats.rx_packets += rx_packets; ss->stats.rx_bytes += rx_bytes; - if (netdev->features & NETIF_F_LRO) + if (lro_enabled) lro_flush_all(&rx_done->lro_mgr); /* restock receive rings if needed */