iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
}
-static bool ftgmac100_rxdes_first_segment(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FRS);
-}
-
-static bool ftgmac100_rxdes_last_segment(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_LRS);
-}
-
-static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
-}
-
-#define RXDES0_ANY_ERROR ( \
- FTGMAC100_RXDES0_RX_ERR | \
- FTGMAC100_RXDES0_CRC_ERR | \
- FTGMAC100_RXDES0_FTL | \
- FTGMAC100_RXDES0_RUNT | \
- FTGMAC100_RXDES0_RX_ODD_NB)
-
-static inline bool ftgmac100_rxdes_any_error(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(RXDES0_ANY_ERROR);
-}
-
-static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ERR);
-}
-
-static bool ftgmac100_rxdes_crc_error(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_CRC_ERR);
-}
-
-static bool ftgmac100_rxdes_frame_too_long(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FTL);
-}
-
-static bool ftgmac100_rxdes_runt(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RUNT);
-}
-
-static bool ftgmac100_rxdes_odd_nibble(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ODD_NB);
-}
-
-static unsigned int ftgmac100_rxdes_data_length(struct ftgmac100_rxdes *rxdes)
-{
- return le32_to_cpu(rxdes->rxdes0) & FTGMAC100_RXDES0_VDBC;
-}
-
-static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
-}
-
-static void ftgmac100_rxdes_set_end_of_ring(const struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
-{
- rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
-}
-
-static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
- dma_addr_t addr)
-{
- rxdes->rxdes3 = cpu_to_le32(addr);
-}
-
-static dma_addr_t ftgmac100_rxdes_get_dma_addr(struct ftgmac100_rxdes *rxdes)
-{
- return le32_to_cpu(rxdes->rxdes3);
-}
-
-static inline bool ftgmac100_rxdes_csum_err(struct ftgmac100_rxdes *rxdes)
-{
- return !!(rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
- FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
- FTGMAC100_RXDES1_IP_CHKSUM_ERR));
-}
-
static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
struct ftgmac100_rxdes *rxdes, gfp_t gfp)
{
priv->rx_skbs[entry] = skb;
/* Store DMA address into RX desc */
- ftgmac100_rxdes_set_dma_addr(rxdes, map);
+ rxdes->rxdes3 = cpu_to_le32(map);
/* Ensure the above is ordered vs clearing the OWN bit */
dma_wmb();
- /* Clean rxdes0 (which resets own bit) */
- rxdes->rxdes0 &= cpu_to_le32(priv->rxdes0_edorr_mask);
+ /* Clean status (which resets own bit) */
+ if (entry == (RX_QUEUE_ENTRIES - 1))
+ rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
+ else
+ rxdes->rxdes0 = 0;
return 0;
}
return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
}
-static void ftgmac100_rx_packet_error(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
{
struct net_device *netdev = priv->netdev;
- if (ftgmac100_rxdes_rx_error(rxdes))
+ if (status & FTGMAC100_RXDES0_RX_ERR)
netdev->stats.rx_errors++;
- if (ftgmac100_rxdes_crc_error(rxdes))
+ if (status & FTGMAC100_RXDES0_CRC_ERR)
netdev->stats.rx_crc_errors++;
- if (ftgmac100_rxdes_frame_too_long(rxdes) ||
- ftgmac100_rxdes_runt(rxdes) ||
- ftgmac100_rxdes_odd_nibble(rxdes))
+ if (status & (FTGMAC100_RXDES0_FTL |
+ FTGMAC100_RXDES0_RUNT |
+ FTGMAC100_RXDES0_RX_ODD_NB))
netdev->stats.rx_length_errors++;
}
struct ftgmac100_rxdes *rxdes;
struct sk_buff *skb;
unsigned int pointer, size;
+ u32 status;
dma_addr_t map;
/* Grab next RX descriptor */
pointer = priv->rx_pointer;
rxdes = &priv->descs->rxdes[pointer];
+ /* Grab descriptor status */
+ status = le32_to_cpu(rxdes->rxdes0);
+
/* Do we have a packet ? */
- if (!ftgmac100_rxdes_packet_ready(rxdes))
+ if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
return false;
/* Order subsequent reads with the test for the ready bit */
dma_rmb();
/* We don't cope with fragmented RX packets */
- if (unlikely(!ftgmac100_rxdes_first_segment(rxdes) ||
- !ftgmac100_rxdes_last_segment(rxdes)))
+ if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
+ !(status & FTGMAC100_RXDES0_LRS)))
goto drop;
/* Any error (other than csum offload) flagged ? */
- if (unlikely(ftgmac100_rxdes_any_error(rxdes))) {
- ftgmac100_rx_packet_error(priv, rxdes);
+ if (unlikely(status & RXDES0_ANY_ERROR)) {
+ ftgmac100_rx_packet_error(priv, status);
goto drop;
}
goto drop;
}
- if (unlikely(ftgmac100_rxdes_multicast(rxdes)))
+ if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
netdev->stats.multicast++;
/* If the HW found checksum errors, bounce it to software.
}
/* Grab received size annd transfer to skb */
- size = ftgmac100_rxdes_data_length(rxdes);
+ size = status & FTGMAC100_RXDES0_VDBC;
skb_put(skb, size);
/* Tear down DMA mapping, do necessary cache management */
- map = ftgmac100_rxdes_get_dma_addr(rxdes);
+ map = le32_to_cpu(rxdes->rxdes3);
+
#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
/* When we don't have an iommu, we can save cycles by not
* invalidating the cache for the part of the packet that
drop:
/* Clean rxdes0 (which resets own bit) */
- rxdes->rxdes0 &= cpu_to_le32(priv->rxdes0_edorr_mask);
+ rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
priv->rx_pointer = ftgmac100_next_rx_pointer(pointer);
netdev->stats.rx_dropped++;
return true;
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
struct sk_buff *skb = priv->rx_skbs[i];
- dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
+ dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
if (!skb)
continue;
static void ftgmac100_init_rings(struct ftgmac100 *priv)
{
+ struct ftgmac100_rxdes *rxdes;
int i;
/* Initialize RX ring */
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
- struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
- ftgmac100_rxdes_set_dma_addr(rxdes, priv->rx_scratch_dma);
+ rxdes = &priv->descs->rxdes[i];
rxdes->rxdes0 = 0;
+ rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
}
- ftgmac100_rxdes_set_end_of_ring(priv, &priv->descs->rxdes[i - 1]);
+ /* Mark the end of the ring */
+ rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
/* Initialize TX ring */
for (i = 0; i < TX_QUEUE_ENTRIES; i++)
return IRQ_HANDLED;
}
+static bool ftgmac100_check_rx(struct ftgmac100 *priv)
+{
+ struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[priv->rx_pointer];
+
+ /* Do we have a packet ? */
+ return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
+}
+
static int ftgmac100_poll(struct napi_struct *napi, int budget)
{
struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
*/
iowrite32(FTGMAC100_INT_RXTX,
priv->base + FTGMAC100_OFFSET_ISR);
- if (ftgmac100_rxdes_packet_ready
- (ftgmac100_current_rxdes(priv)) || priv->tx_pending)
+ if (ftgmac100_check_rx(priv) || priv->tx_pending)
return budget;
/* deschedule NAPI */