return -ENOMEM;
priv = netdev_priv(dev);
- priv->dev = dev;
+ priv->ndev = dev;
+ priv->ofdev = ofdev;
priv->node = ofdev->node;
+ SET_NETDEV_DEV(dev, &ofdev->dev);
err = gfar_of_init(dev);
dev_set_drvdata(&ofdev->dev, NULL);
iounmap(priv->regs);
- free_netdev(priv->dev);
+ free_netdev(priv->ndev);
return 0;
}
free_skb_resources(priv);
- dma_free_coherent(&dev->dev,
+ dma_free_coherent(&priv->ofdev->dev,
sizeof(struct txbd8)*priv->tx_ring_size
+ sizeof(struct rxbd8)*priv->rx_ring_size,
priv->tx_bd_base,
if (!priv->tx_skbuff[i])
continue;
- dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
+ dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
txbdp->length, DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
txbdp++;
- dma_unmap_page(&priv->dev->dev, txbdp->bufPtr,
+ dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
txbdp->length, DMA_TO_DEVICE);
}
txbdp++;
if(priv->rx_skbuff != NULL) {
for (i = 0; i < priv->rx_ring_size; i++) {
if (priv->rx_skbuff[i]) {
- dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
+ dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
priv->rx_buffer_size,
DMA_FROM_DEVICE);
gfar_write(®s->imask, IMASK_INIT_CLEAR);
/* Allocate memory for the buffer descriptors */
- vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
+ vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev,
sizeof (struct txbd8) * priv->tx_ring_size +
sizeof (struct rxbd8) * priv->rx_ring_size,
&addr, GFP_KERNEL);
rx_skb_fail:
free_skb_resources(priv);
tx_skb_fail:
- dma_free_coherent(&dev->dev,
+ dma_free_coherent(&priv->ofdev->dev,
sizeof(struct txbd8)*priv->tx_ring_size
+ sizeof(struct rxbd8)*priv->rx_ring_size,
priv->tx_bd_base,
if (i == nr_frags - 1)
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
- bufaddr = dma_map_page(&dev->dev,
+ bufaddr = dma_map_page(&priv->ofdev->dev,
skb_shinfo(skb)->frags[i].page,
skb_shinfo(skb)->frags[i].page_offset,
length,
/* setup the TxBD length and buffer pointer for the first BD */
priv->tx_skbuff[priv->skb_curtx] = skb;
- txbdp_start->bufPtr = dma_map_single(&dev->dev, skb->data,
+ txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
{
struct gfar_private *priv = container_of(work, struct gfar_private,
reset_task);
- struct net_device *dev = priv->dev;
+ struct net_device *dev = priv->ndev;
if (dev->flags & IFF_UP) {
stop_gfar(dev);
(lstatus & BD_LENGTH_MASK))
break;
- dma_unmap_single(&dev->dev,
+ dma_unmap_single(&priv->ofdev->dev,
bdp->bufPtr,
bdp->length,
DMA_TO_DEVICE);
bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) {
- dma_unmap_page(&dev->dev,
+ dma_unmap_page(&priv->ofdev->dev,
bdp->bufPtr,
bdp->length,
DMA_TO_DEVICE);
struct gfar_private *priv = netdev_priv(dev);
u32 lstatus;
- bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
+ bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
priv->rx_buffer_size, DMA_FROM_DEVICE);
lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
skb = priv->rx_skbuff[priv->skb_currx];
- dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
+ dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
priv->rx_buffer_size, DMA_FROM_DEVICE);
/* We drop the frame if we failed to allocate a new buffer */
static int gfar_poll(struct napi_struct *napi, int budget)
{
struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
- struct net_device *dev = priv->dev;
+ struct net_device *dev = priv->ndev;
int tx_cleaned = 0;
int rx_cleaned = 0;
unsigned long flags;