void (*set_eee_timer)(void __iomem *ioaddr, const int ls,
const int tw);
void (*set_eee_pls)(void __iomem *ioaddr, const int link);
+
+ /* Enable disable checksum offload operations */
+ void (*enable_rx_csum)(void __iomem *ioaddr);
+ void (*disable_rx_csum)(void __iomem *ioaddr);
};
const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
struct sxgbe_ops *hw; /* sxgbe specific ops */
int no_csum_insertion;
int irq;
+ int rxcsum_insertion;
spinlock_t stats_lock; /* lock for tx/rx statatics */
struct phy_device *phydev;
writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL);
}
+static void sxgbe_enable_rx_csum(void __iomem *ioaddr)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
+ writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+static void sxgbe_disable_rx_csum(void __iomem *ioaddr)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
+ writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
const struct sxgbe_core_ops core_ops = {
.core_init = sxgbe_core_init,
.dump_regs = sxgbe_core_dump_regs,
.reset_eee_mode = sxgbe_reset_eee_mode,
.set_eee_timer = sxgbe_set_eee_timer,
.set_eee_pls = sxgbe_set_eee_pls,
+ .enable_rx_csum = sxgbe_enable_rx_csum,
+ .disable_rx_csum = sxgbe_disable_rx_csum,
};
const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned int entry, frag_num;
+ int cksum_flag = 0;
struct netdev_queue *dev_txq;
unsigned txq_index = skb_get_queue_mapping(skb);
struct sxgbe_priv_data *priv = netdev_priv(dev);
__func__);
priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
- no_pagedlen, 0);
+ no_pagedlen, cksum_flag);
}
}
/* prepare the descriptor */
priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
- len, 0);
+ len, cksum_flag);
/* memory barrier to flush descriptor */
wmb();
unsigned int entry = priv->rxq[qnum]->cur_rx;
unsigned int next_entry = 0;
unsigned int count = 0;
+ int checksum;
+ int status;
while (count < limit) {
struct sxgbe_rx_norm_desc *p;
next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
prefetch(priv->rxq[qnum]->dma_rx + next_entry);
- /*TO DO read the status of the incoming frame */
+ /* Read the status of the incoming frame and also get checksum
+ * value based on whether it is enabled in SXGBE hardware or
+ * not.
+ */
+ status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
+ &checksum);
+ if (unlikely(status < 0)) {
+ entry = next_entry;
+ continue;
+ }
+ if (unlikely(!priv->rxcsum_insertion))
+ checksum = CHECKSUM_NONE;
skb = priv->rxq[qnum]->rx_skbuff[entry];
skb_put(skb, frame_len);
- netif_receive_skb(skb);
+ skb->ip_summed = checksum;
+ if (checksum == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&priv->napi, skb);
entry = next_entry;
}
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
- u32 ctrl;
if (changed & NETIF_F_RXCSUM) {
- ctrl = readl(priv->ioaddr + SXGBE_CORE_RX_CONFIG_REG);
- if (features & NETIF_F_RXCSUM)
- ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
- else
- ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
- writel(ctrl, priv->ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ if (features & NETIF_F_RXCSUM) {
+ priv->hw->mac->enable_rx_csum(priv->ioaddr);
+ priv->rxcsum_insertion = true;
+ } else {
+ priv->hw->mac->disable_rx_csum(priv->ioaddr);
+ priv->rxcsum_insertion = false;
+ }
}
return 0;
}
}
+ /* Enable Rx checksum offload */
+ if (priv->hw_cap.rx_csum_offload) {
+ priv->hw->mac->enable_rx_csum(priv->ioaddr);
+ priv->rxcsum_insertion = true;
+ }
+
/* Rx Watchdog is available, enable depend on platform data */
if (!priv->plat->riwt_off) {
priv->use_riwt = 1;