net: sxgbe: add Checksum offload support for Samsung sxgbe
authorVipul Pandya <vipul.pandya@samsung.com>
Tue, 25 Mar 2014 19:11:02 +0000 (12:11 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 26 Mar 2014 20:49:31 +0000 (16:49 -0400)
This patch adds TX and RX checksum offload support.

Signed-off-by: Vipul Pandya <vipul.pandya@samsung.com>
Neatening-by: Joe Perches <joe@perches.com>
Signed-off-by: Byungho An <bh74.an@samsung.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c

index 7293c4c14be819fc2cd8bd5cc93ed13fa8d4f1c7..4893cfd298a7d5088471360338d9fd7e58b7f457 100644 (file)
@@ -340,6 +340,10 @@ struct sxgbe_core_ops {
        void (*set_eee_timer)(void __iomem *ioaddr, const int ls,
                              const int tw);
        void (*set_eee_pls)(void __iomem *ioaddr, const int link);
+
+       /* Enable disable checksum offload operations */
+       void (*enable_rx_csum)(void __iomem *ioaddr);
+       void (*disable_rx_csum)(void __iomem *ioaddr);
 };
 
 const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
@@ -452,6 +456,7 @@ struct sxgbe_priv_data {
        struct sxgbe_ops *hw;   /* sxgbe specific ops */
        int no_csum_insertion;
        int irq;
+       int rxcsum_insertion;
        spinlock_t stats_lock;  /* lock for tx/rx statatics */
 
        struct phy_device *phydev;
index 01647901f55fe1f20da349e08c068d36998119cd..66d4a74a137c3ae8f61c5b0c6dac0512bc8d9e1e 100644 (file)
@@ -218,6 +218,24 @@ static void  sxgbe_set_eee_timer(void __iomem *ioaddr,
        writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL);
 }
 
+static void sxgbe_enable_rx_csum(void __iomem *ioaddr)
+{
+       u32 ctrl;
+
+       ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+       ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
+       writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+static void sxgbe_disable_rx_csum(void __iomem *ioaddr)
+{
+       u32 ctrl;
+
+       ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+       ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
+       writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
 const struct sxgbe_core_ops core_ops = {
        .core_init              = sxgbe_core_init,
        .dump_regs              = sxgbe_core_dump_regs,
@@ -234,6 +252,8 @@ const struct sxgbe_core_ops core_ops = {
        .reset_eee_mode         = sxgbe_reset_eee_mode,
        .set_eee_timer          = sxgbe_set_eee_timer,
        .set_eee_pls            = sxgbe_set_eee_pls,
+       .enable_rx_csum         = sxgbe_enable_rx_csum,
+       .disable_rx_csum        = sxgbe_disable_rx_csum,
 };
 
 const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
index 6d44b9faf64e606adcda48295671d26ee5a0a3cb..838cb9fb0ea979514bafc7b068cd4c09f3ee5d49 100644 (file)
@@ -113,7 +113,7 @@ struct sxgbe_rx_norm_desc {
                        /* WB RDES3 */
                        u32 pkt_len:14;
                        u32 rdes3_reserved:1;
-                       u32 err_summary:15;
+                       u32 err_summary:1;
                        u32 err_l2_type:4;
                        u32 layer34_pkt_type:4;
                        u32 no_coagulation_pkt:1;
index dc0249bfa03bd6a3d4e2544ee84f27beac602cea..96b4b2c25b1608e48343233f87e29c7f1296fdb6 100644 (file)
@@ -1252,6 +1252,7 @@ void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
 static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned int entry, frag_num;
+       int cksum_flag = 0;
        struct netdev_queue *dev_txq;
        unsigned txq_index = skb_get_queue_mapping(skb);
        struct sxgbe_priv_data *priv = netdev_priv(dev);
@@ -1332,7 +1333,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
                                           __func__);
 
                        priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
-                                                       no_pagedlen, 0);
+                                                       no_pagedlen, cksum_flag);
                }
        }
 
@@ -1350,7 +1351,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
 
                /* prepare the descriptor */
                priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
-                                               len, 0);
+                                               len, cksum_flag);
                /* memory barrier to flush descriptor */
                wmb();
 
@@ -1471,6 +1472,8 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
        unsigned int entry = priv->rxq[qnum]->cur_rx;
        unsigned int next_entry = 0;
        unsigned int count = 0;
+       int checksum;
+       int status;
 
        while (count < limit) {
                struct sxgbe_rx_norm_desc *p;
@@ -1487,7 +1490,18 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
                next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
                prefetch(priv->rxq[qnum]->dma_rx + next_entry);
 
-               /*TO DO read the status of the incoming frame */
+               /* Read the status of the incoming frame and also get checksum
+                * value based on whether it is enabled in SXGBE hardware or
+                * not.
+                */
+               status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
+                                                    &checksum);
+               if (unlikely(status < 0)) {
+                       entry = next_entry;
+                       continue;
+               }
+               if (unlikely(!priv->rxcsum_insertion))
+                       checksum = CHECKSUM_NONE;
 
                skb = priv->rxq[qnum]->rx_skbuff[entry];
 
@@ -1501,7 +1515,11 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
 
                skb_put(skb, frame_len);
 
-               netif_receive_skb(skb);
+               skb->ip_summed = checksum;
+               if (checksum == CHECKSUM_NONE)
+                       netif_receive_skb(skb);
+               else
+                       napi_gro_receive(&priv->napi, skb);
 
                entry = next_entry;
        }
@@ -1748,15 +1766,15 @@ static int sxgbe_set_features(struct net_device *dev,
 {
        struct sxgbe_priv_data *priv = netdev_priv(dev);
        netdev_features_t changed = dev->features ^ features;
-       u32 ctrl;
 
        if (changed & NETIF_F_RXCSUM) {
-               ctrl = readl(priv->ioaddr + SXGBE_CORE_RX_CONFIG_REG);
-               if (features & NETIF_F_RXCSUM)
-                       ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
-               else
-                       ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
-               writel(ctrl, priv->ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+               if (features & NETIF_F_RXCSUM) {
+                       priv->hw->mac->enable_rx_csum(priv->ioaddr);
+                       priv->rxcsum_insertion = true;
+               } else {
+                       priv->hw->mac->disable_rx_csum(priv->ioaddr);
+                       priv->rxcsum_insertion = false;
+               }
        }
 
        return 0;
@@ -2115,6 +2133,12 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
                }
        }
 
+       /* Enable Rx checksum offload */
+       if (priv->hw_cap.rx_csum_offload) {
+               priv->hw->mac->enable_rx_csum(priv->ioaddr);
+               priv->rxcsum_insertion = true;
+       }
+
        /* Rx Watchdog is available, enable depend on platform data */
        if (!priv->plat->riwt_off) {
                priv->use_riwt = 1;