#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
static void gfar_halt_nodisable(struct gfar_private *priv);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
rx_queue = priv->rx_queue[i];
rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
- rx_queue->dev = ndev;
+ rx_queue->ndev = ndev;
addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
}
priv->rx_queue[i]->rx_skbuff = NULL;
priv->rx_queue[i]->qindex = i;
- priv->rx_queue[i]->dev = priv->ndev;
+ priv->rx_queue[i]->ndev = priv->ndev;
}
return 0;
}
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
{
struct rxbd8 *rxbdp;
- struct gfar_private *priv = netdev_priv(rx_queue->dev);
+ struct gfar_private *priv = netdev_priv(rx_queue->ndev);
int i;
rxbdp = rx_queue->rx_bd_base;
static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
{
- struct gfar_private *priv = netdev_priv(rx_queue->dev);
+ struct gfar_private *priv = netdev_priv(rx_queue->ndev);
struct gfar_extra_stats *estats = &priv->extra_stats;
- netdev_err(rx_queue->dev, "Can't alloc RX buffers\n");
+ netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
atomic64_inc(&estats->rx_alloc_err);
}
static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
int alloc_cnt)
{
- struct net_device *ndev = rx_queue->dev;
+ struct net_device *ndev = rx_queue->ndev;
struct rxbd8 *bdp, *base;
dma_addr_t bufaddr;
int i;
rx_queue->next_to_use = i;
}
-static void count_errors(u32 lstatus, struct net_device *dev)
+static void count_errors(u32 lstatus, struct net_device *ndev)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
struct gfar_extra_stats *estats = &priv->extra_stats;
/* If the packet was truncated, none of the other errors matter */
}
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
{
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(ndev);
struct rxfcb *fcb = NULL;
/* fcb is at the beginning if exists */
/* Remove the FCB from the skb
* Remove the padded bytes, if there are any
*/
- if (priv->uses_rxfcb) {
- skb_record_rx_queue(skb, fcb->rq);
+ if (priv->uses_rxfcb)
skb_pull(skb, GMAC_FCB_LEN);
- }
/* Get receive timestamp from the skb */
if (priv->hwts_rx_en) {
if (priv->padding)
skb_pull(skb, priv->padding);
- if (dev->features & NETIF_F_RXCSUM)
+ if (ndev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb);
/* Tell the skb what kind of packet this is */
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, ndev);
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
- if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
be16_to_cpu(fcb->flags) & RXFCB_VLN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(fcb->vlctl));
-
- /* Send the packet up the stack */
- napi_gro_receive(napi, skb);
-
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
*/
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
{
- struct net_device *dev = rx_queue->dev;
+ struct net_device *ndev = rx_queue->ndev;
struct rxbd8 *bdp, *base;
struct sk_buff *skb;
int i, howmany = 0;
int cleaned_cnt = gfar_rxbd_unused(rx_queue);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(ndev);
/* Get the first full descriptor */
base = rx_queue->rx_bd_base;
if (unlikely(!(lstatus & BD_LFLAG(RXBD_LAST)) ||
(lstatus & BD_LFLAG(RXBD_ERR)))) {
- count_errors(lstatus, dev);
+ count_errors(lstatus, ndev);
/* discard faulty buffer */
dev_kfree_skb(skb);
skb_put(skb, pkt_len);
rx_queue->stats.rx_bytes += pkt_len;
skb_record_rx_queue(skb, rx_queue->qindex);
- gfar_process_frame(dev, skb,
- &rx_queue->grp->napi_rx);
+ gfar_process_frame(ndev, skb);
+
+ /* Send the packet up the stack */
+ napi_gro_receive(&rx_queue->grp->napi_rx, skb);
} else {
- netif_warn(priv, rx_err, dev, "Missing skb!\n");
+ netif_warn(priv, rx_err, ndev, "Missing skb!\n");
rx_queue->stats.rx_dropped++;
atomic64_inc(&priv->extra_stats.rx_skbmissing);
}