bonding:record primary when modify it via sysfs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / rdc / r6040.c
CommitLineData
7a47dd7a
SW
1/*
2 * RDC R6040 Fast Ethernet MAC support
3 *
4 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
5 * Copyright (C) 2007
5ac5d616 6 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
1caf09df 7 * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org>
7a47dd7a
SW
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
23*/
24
25#include <linux/kernel.h>
26#include <linux/module.h>
7a47dd7a
SW
27#include <linux/moduleparam.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/errno.h>
31#include <linux/ioport.h>
7a47dd7a
SW
32#include <linux/interrupt.h>
33#include <linux/pci.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/skbuff.h>
37#include <linux/init.h>
38#include <linux/delay.h>
39#include <linux/mii.h>
40#include <linux/ethtool.h>
41#include <linux/crc32.h>
42#include <linux/spinlock.h>
092427be
JG
43#include <linux/bitops.h>
44#include <linux/io.h>
45#include <linux/irq.h>
46#include <linux/uaccess.h>
3831861b 47#include <linux/phy.h>
7a47dd7a
SW
48
49#include <asm/processor.h>
7a47dd7a
SW
50
51#define DRV_NAME "r6040"
5bdc4f5d
FF
52#define DRV_VERSION "0.28"
53#define DRV_RELDATE "07Oct2011"
7a47dd7a 54
7a47dd7a 55/* Time in jiffies before concluding the transmitter is hung. */
5ac5d616 56#define TX_TIMEOUT (6000 * HZ / 1000)
7a47dd7a
SW
57
58/* RDC MAC I/O Size */
59#define R6040_IO_SIZE 256
60
61/* MAX RDC MAC */
62#define MAX_MAC 2
63
64/* MAC registers */
65#define MCR0 0x00 /* Control register 0 */
4e16d6eb 66#define MCR0_RCVEN 0x0002 /* Receive enable */
c60c9c71
SL
67#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
68#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
4e16d6eb
FF
69#define MCR0_XMTEN 0x1000 /* Transmission enable */
70#define MCR0_FD 0x8000 /* Full/Half duplex */
7a47dd7a
SW
71#define MCR1 0x04 /* Control register 1 */
72#define MAC_RST 0x0001 /* Reset the MAC */
73#define MBCR 0x08 /* Bus control */
74#define MT_ICR 0x0C /* TX interrupt control */
75#define MR_ICR 0x10 /* RX interrupt control */
76#define MTPR 0x14 /* TX poll command register */
940ff7ed 77#define TM2TX 0x0001 /* Trigger MAC to transmit */
7a47dd7a
SW
78#define MR_BSR 0x18 /* RX buffer size */
79#define MR_DCR 0x1A /* RX descriptor control */
80#define MLSR 0x1C /* Last status */
8dd87a26
FF
81#define TX_FIFO_UNDR 0x0200 /* TX FIFO under-run */
82#define TX_EXCEEDC 0x2000 /* Transmit exceed collision */
83#define TX_LATEC 0x4000 /* Transmit late collision */
7a47dd7a
SW
84#define MMDIO 0x20 /* MDIO control register */
85#define MDIO_WRITE 0x4000 /* MDIO write */
86#define MDIO_READ 0x2000 /* MDIO read */
87#define MMRD 0x24 /* MDIO read data register */
88#define MMWD 0x28 /* MDIO write data register */
89#define MTD_SA0 0x2C /* TX descriptor start address 0 */
90#define MTD_SA1 0x30 /* TX descriptor start address 1 */
91#define MRD_SA0 0x34 /* RX descriptor start address 0 */
92#define MRD_SA1 0x38 /* RX descriptor start address 1 */
93#define MISR 0x3C /* Status register */
94#define MIER 0x40 /* INT enable register */
95#define MSK_INT 0x0000 /* Mask off interrupts */
3d254348
FF
96#define RX_FINISH 0x0001 /* RX finished */
97#define RX_NO_DESC 0x0002 /* No RX descriptor available */
98#define RX_FIFO_FULL 0x0004 /* RX FIFO full */
99#define RX_EARLY 0x0008 /* RX early */
100#define TX_FINISH 0x0010 /* TX finished */
101#define TX_EARLY 0x0080 /* TX early */
102#define EVENT_OVRFL 0x0100 /* Event counter overflow */
103#define LINK_CHANGED 0x0200 /* PHY link changed */
7a47dd7a
SW
104#define ME_CISR 0x44 /* Event counter INT status */
105#define ME_CIER 0x48 /* Event counter INT enable */
106#define MR_CNT 0x50 /* Successfully received packet counter */
107#define ME_CNT0 0x52 /* Event counter 0 */
108#define ME_CNT1 0x54 /* Event counter 1 */
109#define ME_CNT2 0x56 /* Event counter 2 */
110#define ME_CNT3 0x58 /* Event counter 3 */
111#define MT_CNT 0x5A /* Successfully transmit packet counter */
112#define ME_CNT4 0x5C /* Event counter 4 */
113#define MP_CNT 0x5E /* Pause frame counter register */
114#define MAR0 0x60 /* Hash table 0 */
115#define MAR1 0x62 /* Hash table 1 */
116#define MAR2 0x64 /* Hash table 2 */
117#define MAR3 0x66 /* Hash table 3 */
118#define MID_0L 0x68 /* Multicast address MID0 Low */
119#define MID_0M 0x6A /* Multicast address MID0 Medium */
120#define MID_0H 0x6C /* Multicast address MID0 High */
121#define MID_1L 0x70 /* MID1 Low */
122#define MID_1M 0x72 /* MID1 Medium */
123#define MID_1H 0x74 /* MID1 High */
124#define MID_2L 0x78 /* MID2 Low */
125#define MID_2M 0x7A /* MID2 Medium */
126#define MID_2H 0x7C /* MID2 High */
127#define MID_3L 0x80 /* MID3 Low */
128#define MID_3M 0x82 /* MID3 Medium */
129#define MID_3H 0x84 /* MID3 High */
130#define PHY_CC 0x88 /* PHY status change configuration register */
31171aec
FF
131#define SCEN 0x8000 /* PHY status change enable */
132#define PHYAD_SHIFT 8 /* PHY address shift */
133#define TMRDIV_SHIFT 0 /* Timer divider shift */
7a47dd7a
SW
134#define PHY_ST 0x8A /* PHY status register */
135#define MAC_SM 0xAC /* MAC status machine */
e1477637 136#define MAC_SM_RST 0x0002 /* MAC status machine reset */
7a47dd7a
SW
137#define MAC_ID 0xBE /* Identifier register */
138
139#define TX_DCNT 0x80 /* TX descriptor count */
140#define RX_DCNT 0x80 /* RX descriptor count */
141#define MAX_BUF_SIZE 0x600
6c323103
FR
142#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
143#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
7a47dd7a 144#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
3bcf8229 145#define MCAST_MAX 3 /* Max number multicast addresses to filter */
7a47dd7a 146
2fa15bbd
FF
147#define MAC_DEF_TIMEOUT 2048 /* Default MAC read/write operation timeout */
148
32f565df
FF
149/* Descriptor status */
150#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
151#define DSC_RX_OK 0x4000 /* RX was successful */
152#define DSC_RX_ERR 0x0800 /* RX PHY error */
153#define DSC_RX_ERR_DRI 0x0400 /* RX dribble packet */
154#define DSC_RX_ERR_BUF 0x0200 /* RX length exceeds buffer size */
155#define DSC_RX_ERR_LONG 0x0100 /* RX length > maximum packet length */
156#define DSC_RX_ERR_RUNT 0x0080 /* RX packet length < 64 byte */
157#define DSC_RX_ERR_CRC 0x0040 /* RX CRC error */
158#define DSC_RX_BCAST 0x0020 /* RX broadcast (no error) */
159#define DSC_RX_MCAST 0x0010 /* RX multicast (no error) */
160#define DSC_RX_MCH_HIT 0x0008 /* RX multicast hit in hash table (no error) */
161#define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */
162#define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */
163
7a47dd7a
SW
164MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
165 "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
166 "Florian Fainelli <florian@openwrt.org>");
167MODULE_LICENSE("GPL");
168MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
bc4de260 169MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
7a47dd7a 170
3d254348 171/* RX and TX interrupts that we handle */
e24ddf3a
FF
172#define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
173#define TX_INTS (TX_FINISH)
174#define INT_MASK (RX_INTS | TX_INTS)
7a47dd7a
SW
175
176struct r6040_descriptor {
177 u16 status, len; /* 0-3 */
178 __le32 buf; /* 4-7 */
179 __le32 ndesc; /* 8-B */
180 u32 rev1; /* C-F */
181 char *vbufp; /* 10-13 */
182 struct r6040_descriptor *vndescp; /* 14-17 */
183 struct sk_buff *skb_ptr; /* 18-1B */
184 u32 rev2; /* 1C-1F */
853d5dc9 185} __aligned(32);
7a47dd7a
SW
186
187struct r6040_private {
188 spinlock_t lock; /* driver lock */
7a47dd7a
SW
189 struct pci_dev *pdev;
190 struct r6040_descriptor *rx_insert_ptr;
191 struct r6040_descriptor *rx_remove_ptr;
192 struct r6040_descriptor *tx_insert_ptr;
193 struct r6040_descriptor *tx_remove_ptr;
6c323103
FR
194 struct r6040_descriptor *rx_ring;
195 struct r6040_descriptor *tx_ring;
196 dma_addr_t rx_ring_dma;
197 dma_addr_t tx_ring_dma;
49f26720 198 u16 tx_free_desc;
0db0cfcc 199 u16 mcr0;
7a47dd7a 200 struct net_device *dev;
3831861b 201 struct mii_bus *mii_bus;
7a47dd7a 202 struct napi_struct napi;
7a47dd7a 203 void __iomem *base;
3831861b
FF
204 struct phy_device *phydev;
205 int old_link;
206 int old_duplex;
7a47dd7a
SW
207};
208
2154c704 209static char version[] __devinitdata = DRV_NAME
7a47dd7a 210 ": RDC R6040 NAPI net driver,"
9a48ce84 211 "version "DRV_VERSION " (" DRV_RELDATE ")";
7a47dd7a 212
7a47dd7a 213/* Read a word data from PHY Chip */
c6e69bb9 214static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
7a47dd7a 215{
2fa15bbd 216 int limit = MAC_DEF_TIMEOUT;
7a47dd7a
SW
217 u16 cmd;
218
219 iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
220 /* Wait for the read bit to be cleared */
221 while (limit--) {
222 cmd = ioread16(ioaddr + MMDIO);
11e5e8f5 223 if (!(cmd & MDIO_READ))
7a47dd7a
SW
224 break;
225 }
226
227 return ioread16(ioaddr + MMRD);
228}
229
230/* Write a word data from PHY Chip */
2154c704
FF
231static void r6040_phy_write(void __iomem *ioaddr,
232 int phy_addr, int reg, u16 val)
7a47dd7a 233{
2fa15bbd 234 int limit = MAC_DEF_TIMEOUT;
7a47dd7a
SW
235 u16 cmd;
236
237 iowrite16(val, ioaddr + MMWD);
238 /* Write the command to the MDIO bus */
239 iowrite16(MDIO_WRITE + reg + (phy_addr << 8), ioaddr + MMDIO);
240 /* Wait for the write bit to be cleared */
241 while (limit--) {
242 cmd = ioread16(ioaddr + MMDIO);
11e5e8f5 243 if (!(cmd & MDIO_WRITE))
7a47dd7a
SW
244 break;
245 }
246}
247
3831861b 248static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
7a47dd7a 249{
3831861b 250 struct net_device *dev = bus->priv;
7a47dd7a
SW
251 struct r6040_private *lp = netdev_priv(dev);
252 void __iomem *ioaddr = lp->base;
253
3831861b 254 return r6040_phy_read(ioaddr, phy_addr, reg);
7a47dd7a
SW
255}
256
3831861b
FF
257static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
258 int reg, u16 value)
7a47dd7a 259{
3831861b 260 struct net_device *dev = bus->priv;
7a47dd7a
SW
261 struct r6040_private *lp = netdev_priv(dev);
262 void __iomem *ioaddr = lp->base;
263
3831861b
FF
264 r6040_phy_write(ioaddr, phy_addr, reg, value);
265
266 return 0;
267}
268
269static int r6040_mdiobus_reset(struct mii_bus *bus)
270{
271 return 0;
7a47dd7a
SW
272}
273
b4f1255d
FF
274static void r6040_free_txbufs(struct net_device *dev)
275{
276 struct r6040_private *lp = netdev_priv(dev);
277 int i;
278
279 for (i = 0; i < TX_DCNT; i++) {
280 if (lp->tx_insert_ptr->skb_ptr) {
ed773b4a
AV
281 pci_unmap_single(lp->pdev,
282 le32_to_cpu(lp->tx_insert_ptr->buf),
b4f1255d
FF
283 MAX_BUF_SIZE, PCI_DMA_TODEVICE);
284 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
3b060be0 285 lp->tx_insert_ptr->skb_ptr = NULL;
b4f1255d
FF
286 }
287 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
288 }
289}
290
291static void r6040_free_rxbufs(struct net_device *dev)
292{
293 struct r6040_private *lp = netdev_priv(dev);
294 int i;
295
296 for (i = 0; i < RX_DCNT; i++) {
297 if (lp->rx_insert_ptr->skb_ptr) {
ed773b4a
AV
298 pci_unmap_single(lp->pdev,
299 le32_to_cpu(lp->rx_insert_ptr->buf),
b4f1255d
FF
300 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
301 dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
302 lp->rx_insert_ptr->skb_ptr = NULL;
303 }
304 lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
305 }
306}
307
b4f1255d
FF
308static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
309 dma_addr_t desc_dma, int size)
310{
311 struct r6040_descriptor *desc = desc_ring;
312 dma_addr_t mapping = desc_dma;
313
314 while (size-- > 0) {
3f6602ad 315 mapping += sizeof(*desc);
b4f1255d
FF
316 desc->ndesc = cpu_to_le32(mapping);
317 desc->vndescp = desc + 1;
318 desc++;
319 }
320 desc--;
321 desc->ndesc = cpu_to_le32(desc_dma);
322 desc->vndescp = desc_ring;
323}
324
3d463419 325static void r6040_init_txbufs(struct net_device *dev)
b4f1255d
FF
326{
327 struct r6040_private *lp = netdev_priv(dev);
b4f1255d
FF
328
329 lp->tx_free_desc = TX_DCNT;
330
331 lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
332 r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
b4f1255d
FF
333}
334
3d463419 335static int r6040_alloc_rxbufs(struct net_device *dev)
b4f1255d
FF
336{
337 struct r6040_private *lp = netdev_priv(dev);
3d463419
FF
338 struct r6040_descriptor *desc;
339 struct sk_buff *skb;
340 int rc;
b4f1255d
FF
341
342 lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
343 r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
344
3d463419
FF
345 /* Allocate skbs for the rx descriptors */
346 desc = lp->rx_ring;
347 do {
348 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
349 if (!skb) {
7d53b809 350 netdev_err(dev, "failed to alloc skb for rx\n");
3d463419
FF
351 rc = -ENOMEM;
352 goto err_exit;
353 }
354 desc->skb_ptr = skb;
355 desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
2154c704
FF
356 desc->skb_ptr->data,
357 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
32f565df 358 desc->status = DSC_OWNER_MAC;
3d463419
FF
359 desc = desc->vndescp;
360 } while (desc != lp->rx_ring);
361
362 return 0;
363
364err_exit:
365 /* Deallocate all previously allocated skbs */
366 r6040_free_rxbufs(dev);
367 return rc;
fec3a23b
FF
368}
369
90f750a8 370static void r6040_reset_mac(struct r6040_private *lp)
fec3a23b 371{
fec3a23b 372 void __iomem *ioaddr = lp->base;
2fa15bbd 373 int limit = MAC_DEF_TIMEOUT;
fec3a23b
FF
374 u16 cmd;
375
fec3a23b
FF
376 iowrite16(MAC_RST, ioaddr + MCR1);
377 while (limit--) {
378 cmd = ioread16(ioaddr + MCR1);
58dbc691 379 if (cmd & MAC_RST)
fec3a23b
FF
380 break;
381 }
90f750a8 382
fec3a23b 383 /* Reset internal state machine */
e1477637 384 iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
fec3a23b 385 iowrite16(0, ioaddr + MAC_SM);
c1d69937 386 mdelay(5);
90f750a8
FF
387}
388
389static void r6040_init_mac_regs(struct net_device *dev)
390{
391 struct r6040_private *lp = netdev_priv(dev);
392 void __iomem *ioaddr = lp->base;
393
394 /* Mask Off Interrupt */
395 iowrite16(MSK_INT, ioaddr + MIER);
396
397 /* Reset RDC MAC */
398 r6040_reset_mac(lp);
fec3a23b
FF
399
400 /* MAC Bus Control Register */
401 iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
402
403 /* Buffer Size Register */
404 iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
405
406 /* Write TX ring start address */
407 iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
408 iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
b4f1255d 409
fec3a23b 410 /* Write RX ring start address */
b4f1255d
FF
411 iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
412 iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
fec3a23b
FF
413
414 /* Set interrupt waiting time and packet numbers */
31718ded
FF
415 iowrite16(0, ioaddr + MT_ICR);
416 iowrite16(0, ioaddr + MR_ICR);
fec3a23b
FF
417
418 /* Enable interrupts */
419 iowrite16(INT_MASK, ioaddr + MIER);
420
421 /* Enable TX and RX */
4e16d6eb 422 iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
fec3a23b
FF
423
424 /* Let TX poll the descriptors
425 * we may got called by r6040_tx_timeout which has left
426 * some unsent tx buffers */
940ff7ed 427 iowrite16(TM2TX, ioaddr + MTPR);
b4f1255d 428}
7a47dd7a 429
106adf3c
FF
430static void r6040_tx_timeout(struct net_device *dev)
431{
432 struct r6040_private *priv = netdev_priv(dev);
433 void __iomem *ioaddr = priv->base;
434
7d53b809 435 netdev_warn(dev, "transmit timed out, int enable %4.4x "
3831861b 436 "status %4.4x\n",
7d53b809 437 ioread16(ioaddr + MIER),
3831861b 438 ioread16(ioaddr + MISR));
106adf3c 439
106adf3c 440 dev->stats.tx_errors++;
fec3a23b
FF
441
442 /* Reset MAC and re-init all registers */
443 r6040_init_mac_regs(dev);
106adf3c
FF
444}
445
7a47dd7a
SW
446static struct net_device_stats *r6040_get_stats(struct net_device *dev)
447{
448 struct r6040_private *priv = netdev_priv(dev);
449 void __iomem *ioaddr = priv->base;
450 unsigned long flags;
451
452 spin_lock_irqsave(&priv->lock, flags);
d248fd77
FF
453 dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
454 dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
7a47dd7a
SW
455 spin_unlock_irqrestore(&priv->lock, flags);
456
d248fd77 457 return &dev->stats;
7a47dd7a
SW
458}
459
460/* Stop RDC MAC and Free the allocated resource */
461static void r6040_down(struct net_device *dev)
462{
463 struct r6040_private *lp = netdev_priv(dev);
464 void __iomem *ioaddr = lp->base;
7a47dd7a 465 u16 *adrp;
7a47dd7a
SW
466
467 /* Stop MAC */
468 iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
90f750a8
FF
469
470 /* Reset RDC MAC */
471 r6040_reset_mac(lp);
7a47dd7a
SW
472
473 /* Restore MAC Address to MIDx */
474 adrp = (u16 *) dev->dev_addr;
475 iowrite16(adrp[0], ioaddr + MID_0L);
476 iowrite16(adrp[1], ioaddr + MID_0M);
477 iowrite16(adrp[2], ioaddr + MID_0H);
06e92c33
FF
478
479 phy_stop(lp->phydev);
7a47dd7a
SW
480}
481
5ac5d616 482static int r6040_close(struct net_device *dev)
7a47dd7a
SW
483{
484 struct r6040_private *lp = netdev_priv(dev);
58854c6b 485 struct pci_dev *pdev = lp->pdev;
7a47dd7a 486
7a47dd7a 487 spin_lock_irq(&lp->lock);
129cf9a7 488 napi_disable(&lp->napi);
7a47dd7a
SW
489 netif_stop_queue(dev);
490 r6040_down(dev);
58854c6b
FF
491
492 free_irq(dev->irq, dev);
493
494 /* Free RX buffer */
495 r6040_free_rxbufs(dev);
496
497 /* Free TX buffer */
498 r6040_free_txbufs(dev);
499
7a47dd7a
SW
500 spin_unlock_irq(&lp->lock);
501
58854c6b
FF
502 /* Free Descriptor memory */
503 if (lp->rx_ring) {
2154c704
FF
504 pci_free_consistent(pdev,
505 RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
5b5103ec 506 lp->rx_ring = NULL;
58854c6b
FF
507 }
508
509 if (lp->tx_ring) {
2154c704
FF
510 pci_free_consistent(pdev,
511 TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
5b5103ec 512 lp->tx_ring = NULL;
58854c6b
FF
513 }
514
7a47dd7a
SW
515 return 0;
516}
517
7a47dd7a
SW
518static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
519{
520 struct r6040_private *lp = netdev_priv(dev);
7a47dd7a 521
3831861b 522 if (!lp->phydev)
7a47dd7a 523 return -EINVAL;
3831861b 524
4cfa580e 525 return phy_mii_ioctl(lp->phydev, rq, cmd);
7a47dd7a
SW
526}
527
528static int r6040_rx(struct net_device *dev, int limit)
529{
530 struct r6040_private *priv = netdev_priv(dev);
9ca28dc4
FF
531 struct r6040_descriptor *descptr = priv->rx_remove_ptr;
532 struct sk_buff *skb_ptr, *new_skb;
533 int count = 0;
7a47dd7a
SW
534 u16 err;
535
9ca28dc4 536 /* Limit not reached and the descriptor belongs to the CPU */
32f565df 537 while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
9ca28dc4
FF
538 /* Read the descriptor status */
539 err = descptr->status;
540 /* Global error status set */
32f565df 541 if (err & DSC_RX_ERR) {
9ca28dc4 542 /* RX dribble */
32f565df 543 if (err & DSC_RX_ERR_DRI)
9ca28dc4 544 dev->stats.rx_frame_errors++;
25985edc 545 /* Buffer length exceeded */
32f565df 546 if (err & DSC_RX_ERR_BUF)
9ca28dc4
FF
547 dev->stats.rx_length_errors++;
548 /* Packet too long */
32f565df 549 if (err & DSC_RX_ERR_LONG)
9ca28dc4
FF
550 dev->stats.rx_length_errors++;
551 /* Packet < 64 bytes */
32f565df 552 if (err & DSC_RX_ERR_RUNT)
9ca28dc4
FF
553 dev->stats.rx_length_errors++;
554 /* CRC error */
32f565df 555 if (err & DSC_RX_ERR_CRC) {
9ca28dc4
FF
556 spin_lock(&priv->lock);
557 dev->stats.rx_crc_errors++;
558 spin_unlock(&priv->lock);
7a47dd7a 559 }
9ca28dc4
FF
560 goto next_descr;
561 }
2154c704 562
9ca28dc4
FF
563 /* Packet successfully received */
564 new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
565 if (!new_skb) {
566 dev->stats.rx_dropped++;
567 goto next_descr;
7a47dd7a 568 }
9ca28dc4
FF
569 skb_ptr = descptr->skb_ptr;
570 skb_ptr->dev = priv->dev;
2154c704 571
9ca28dc4
FF
572 /* Do not count the CRC */
573 skb_put(skb_ptr, descptr->len - 4);
574 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
575 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
576 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
2154c704 577
9ca28dc4
FF
578 /* Send to upper layer */
579 netif_receive_skb(skb_ptr);
9ca28dc4
FF
580 dev->stats.rx_packets++;
581 dev->stats.rx_bytes += descptr->len - 4;
582
583 /* put new skb into descriptor */
584 descptr->skb_ptr = new_skb;
585 descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
586 descptr->skb_ptr->data,
587 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
588
589next_descr:
590 /* put the descriptor back to the MAC */
32f565df 591 descptr->status = DSC_OWNER_MAC;
9ca28dc4
FF
592 descptr = descptr->vndescp;
593 count++;
7a47dd7a 594 }
9ca28dc4 595 priv->rx_remove_ptr = descptr;
7a47dd7a
SW
596
597 return count;
598}
599
600static void r6040_tx(struct net_device *dev)
601{
602 struct r6040_private *priv = netdev_priv(dev);
603 struct r6040_descriptor *descptr;
604 void __iomem *ioaddr = priv->base;
605 struct sk_buff *skb_ptr;
606 u16 err;
607
608 spin_lock(&priv->lock);
609 descptr = priv->tx_remove_ptr;
610 while (priv->tx_free_desc < TX_DCNT) {
611 /* Check for errors */
612 err = ioread16(ioaddr + MLSR);
613
8dd87a26 614 if (err & TX_FIFO_UNDR)
3440ecc4 615 dev->stats.tx_fifo_errors++;
8dd87a26 616 if (err & (TX_EXCEEDC | TX_LATEC))
d248fd77 617 dev->stats.tx_carrier_errors++;
7a47dd7a 618
32f565df 619 if (descptr->status & DSC_OWNER_MAC)
ec6d2d45 620 break; /* Not complete */
7a47dd7a 621 skb_ptr = descptr->skb_ptr;
ed773b4a 622 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
7a47dd7a
SW
623 skb_ptr->len, PCI_DMA_TODEVICE);
624 /* Free buffer */
625 dev_kfree_skb_irq(skb_ptr);
626 descptr->skb_ptr = NULL;
627 /* To next descriptor */
628 descptr = descptr->vndescp;
629 priv->tx_free_desc++;
630 }
631 priv->tx_remove_ptr = descptr;
632
633 if (priv->tx_free_desc)
634 netif_wake_queue(dev);
635 spin_unlock(&priv->lock);
636}
637
638static int r6040_poll(struct napi_struct *napi, int budget)
639{
640 struct r6040_private *priv =
641 container_of(napi, struct r6040_private, napi);
642 struct net_device *dev = priv->dev;
643 void __iomem *ioaddr = priv->base;
644 int work_done;
645
646 work_done = r6040_rx(dev, budget);
647
648 if (work_done < budget) {
288379f0 649 napi_complete(napi);
7a47dd7a 650 /* Enable RX interrupt */
e24ddf3a 651 iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
7a47dd7a
SW
652 }
653 return work_done;
654}
655
656/* The RDC interrupt handler. */
657static irqreturn_t r6040_interrupt(int irq, void *dev_id)
658{
659 struct net_device *dev = dev_id;
660 struct r6040_private *lp = netdev_priv(dev);
661 void __iomem *ioaddr = lp->base;
3e7c469f 662 u16 misr, status;
7a47dd7a 663
3e7c469f
JC
664 /* Save MIER */
665 misr = ioread16(ioaddr + MIER);
7a47dd7a
SW
666 /* Mask off RDC MAC interrupt */
667 iowrite16(MSK_INT, ioaddr + MIER);
668 /* Read MISR status and clear */
669 status = ioread16(ioaddr + MISR);
670
35976d4d
FF
671 if (status == 0x0000 || status == 0xffff) {
672 /* Restore RDC MAC interrupt */
673 iowrite16(misr, ioaddr + MIER);
7a47dd7a 674 return IRQ_NONE;
35976d4d 675 }
7a47dd7a
SW
676
677 /* RX interrupt request */
e24ddf3a
FF
678 if (status & RX_INTS) {
679 if (status & RX_NO_DESC) {
680 /* RX descriptor unavailable */
681 dev->stats.rx_dropped++;
682 dev->stats.rx_missed_errors++;
683 }
684 if (status & RX_FIFO_FULL)
685 dev->stats.rx_fifo_errors++;
686
0d9b6e73
MT
687 if (likely(napi_schedule_prep(&lp->napi))) {
688 /* Mask off RX interrupt */
689 misr &= ~RX_INTS;
690 __napi_schedule(&lp->napi);
691 }
7a47dd7a
SW
692 }
693
694 /* TX interrupt request */
e24ddf3a 695 if (status & TX_INTS)
7a47dd7a
SW
696 r6040_tx(dev);
697
3e7c469f
JC
698 /* Restore RDC MAC interrupt */
699 iowrite16(misr, ioaddr + MIER);
700
ec6d2d45 701 return IRQ_HANDLED;
7a47dd7a
SW
702}
703
704#ifdef CONFIG_NET_POLL_CONTROLLER
705static void r6040_poll_controller(struct net_device *dev)
706{
707 disable_irq(dev->irq);
5ac5d616 708 r6040_interrupt(dev->irq, dev);
7a47dd7a
SW
709 enable_irq(dev->irq);
710}
711#endif
712
7a47dd7a 713/* Init RDC MAC */
3d463419 714static int r6040_up(struct net_device *dev)
7a47dd7a
SW
715{
716 struct r6040_private *lp = netdev_priv(dev);
7a47dd7a 717 void __iomem *ioaddr = lp->base;
3d463419 718 int ret;
7a47dd7a 719
b4f1255d 720 /* Initialise and alloc RX/TX buffers */
3d463419
FF
721 r6040_init_txbufs(dev);
722 ret = r6040_alloc_rxbufs(dev);
723 if (ret)
724 return ret;
7a47dd7a 725
7a47dd7a 726 /* improve performance (by RDC guys) */
2154c704
FF
727 r6040_phy_write(ioaddr, 30, 17,
728 (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
729 r6040_phy_write(ioaddr, 30, 17,
730 ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
c6e69bb9
FF
731 r6040_phy_write(ioaddr, 0, 19, 0x0000);
732 r6040_phy_write(ioaddr, 0, 30, 0x01F0);
7a47dd7a 733
fec3a23b
FF
734 /* Initialize all MAC registers */
735 r6040_init_mac_regs(dev);
3d463419 736
06e92c33
FF
737 phy_start(lp->phydev);
738
3d463419 739 return 0;
7a47dd7a
SW
740}
741
7a47dd7a
SW
742
743/* Read/set MAC address routines */
744static void r6040_mac_address(struct net_device *dev)
745{
746 struct r6040_private *lp = netdev_priv(dev);
747 void __iomem *ioaddr = lp->base;
748 u16 *adrp;
749
48529680 750 /* Reset MAC */
90f750a8 751 r6040_reset_mac(lp);
7a47dd7a
SW
752
753 /* Restore MAC Address */
754 adrp = (u16 *) dev->dev_addr;
755 iowrite16(adrp[0], ioaddr + MID_0L);
756 iowrite16(adrp[1], ioaddr + MID_0M);
757 iowrite16(adrp[2], ioaddr + MID_0H);
42099d7a
OS
758
759 /* Store MAC Address in perm_addr */
760 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
7a47dd7a
SW
761}
762
5ac5d616 763static int r6040_open(struct net_device *dev)
7a47dd7a 764{
5ac5d616 765 struct r6040_private *lp = netdev_priv(dev);
7a47dd7a
SW
766 int ret;
767
768 /* Request IRQ and Register interrupt handler */
91dcbf36 769 ret = request_irq(dev->irq, r6040_interrupt,
7a47dd7a
SW
770 IRQF_SHARED, dev->name, dev);
771 if (ret)
ced1de4c 772 goto out;
7a47dd7a
SW
773
774 /* Set MAC address */
775 r6040_mac_address(dev);
776
777 /* Allocate Descriptor memory */
6c323103
FR
778 lp->rx_ring =
779 pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma);
ced1de4c
DK
780 if (!lp->rx_ring) {
781 ret = -ENOMEM;
782 goto err_free_irq;
783 }
7a47dd7a 784
6c323103
FR
785 lp->tx_ring =
786 pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma);
787 if (!lp->tx_ring) {
ced1de4c
DK
788 ret = -ENOMEM;
789 goto err_free_rx_ring;
6c323103
FR
790 }
791
3d463419 792 ret = r6040_up(dev);
ced1de4c
DK
793 if (ret)
794 goto err_free_tx_ring;
7a47dd7a
SW
795
796 napi_enable(&lp->napi);
797 netif_start_queue(dev);
798
7a47dd7a 799 return 0;
ced1de4c
DK
800
801err_free_tx_ring:
802 pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
803 lp->tx_ring_dma);
804err_free_rx_ring:
805 pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
806 lp->rx_ring_dma);
807err_free_irq:
808 free_irq(dev->irq, dev);
809out:
810 return ret;
7a47dd7a
SW
811}
812
61357325
SH
813static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
814 struct net_device *dev)
7a47dd7a
SW
815{
816 struct r6040_private *lp = netdev_priv(dev);
817 struct r6040_descriptor *descptr;
818 void __iomem *ioaddr = lp->base;
819 unsigned long flags;
7a47dd7a
SW
820
821 /* Critical Section */
822 spin_lock_irqsave(&lp->lock, flags);
823
824 /* TX resource check */
825 if (!lp->tx_free_desc) {
826 spin_unlock_irqrestore(&lp->lock, flags);
092427be 827 netif_stop_queue(dev);
7d53b809 828 netdev_err(dev, ": no tx descriptor\n");
61357325 829 return NETDEV_TX_BUSY;
7a47dd7a
SW
830 }
831
832 /* Statistic Counter */
833 dev->stats.tx_packets++;
834 dev->stats.tx_bytes += skb->len;
835 /* Set TX descriptor & Transmit it */
836 lp->tx_free_desc--;
837 descptr = lp->tx_insert_ptr;
838 if (skb->len < MISR)
839 descptr->len = MISR;
840 else
841 descptr->len = skb->len;
842
843 descptr->skb_ptr = skb;
844 descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
845 skb->data, skb->len, PCI_DMA_TODEVICE));
32f565df 846 descptr->status = DSC_OWNER_MAC;
2aa8f4c9
RC
847
848 skb_tx_timestamp(skb);
849
7a47dd7a 850 /* Trigger the MAC to check the TX descriptor */
940ff7ed 851 iowrite16(TM2TX, ioaddr + MTPR);
7a47dd7a
SW
852 lp->tx_insert_ptr = descptr->vndescp;
853
854 /* If no tx resource, stop */
855 if (!lp->tx_free_desc)
856 netif_stop_queue(dev);
857
7a47dd7a 858 spin_unlock_irqrestore(&lp->lock, flags);
61357325
SH
859
860 return NETDEV_TX_OK;
7a47dd7a
SW
861}
862
5ac5d616 863static void r6040_multicast_list(struct net_device *dev)
7a47dd7a
SW
864{
865 struct r6040_private *lp = netdev_priv(dev);
866 void __iomem *ioaddr = lp->base;
7a47dd7a 867 unsigned long flags;
22bedad3 868 struct netdev_hw_addr *ha;
7a47dd7a 869 int i;
c60c9c71
SL
870 u16 *adrp;
871 u16 hash_table[4] = { 0 };
872
873 spin_lock_irqsave(&lp->lock, flags);
7a47dd7a 874
c60c9c71 875 /* Keep our MAC Address */
7a47dd7a
SW
876 adrp = (u16 *)dev->dev_addr;
877 iowrite16(adrp[0], ioaddr + MID_0L);
878 iowrite16(adrp[1], ioaddr + MID_0M);
879 iowrite16(adrp[2], ioaddr + MID_0H);
880
7a47dd7a 881 /* Clear AMCP & PROM bits */
c60c9c71 882 lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
7a47dd7a 883
c60c9c71
SL
884 /* Promiscuous mode */
885 if (dev->flags & IFF_PROMISC)
886 lp->mcr0 |= MCR0_PROMISC;
7a47dd7a 887
c60c9c71
SL
888 /* Enable multicast hash table function to
889 * receive all multicast packets. */
890 else if (dev->flags & IFF_ALLMULTI) {
891 lp->mcr0 |= MCR0_HASH_EN;
7a47dd7a 892
c60c9c71
SL
893 for (i = 0; i < MCAST_MAX ; i++) {
894 iowrite16(0, ioaddr + MID_1L + 8 * i);
895 iowrite16(0, ioaddr + MID_1M + 8 * i);
896 iowrite16(0, ioaddr + MID_1H + 8 * i);
897 }
7a47dd7a 898
c60c9c71
SL
899 for (i = 0; i < 4; i++)
900 hash_table[i] = 0xffff;
901 }
902 /* Use internal multicast address registers if the number of
903 * multicast addresses is not greater than MCAST_MAX. */
904 else if (netdev_mc_count(dev) <= MCAST_MAX) {
905 i = 0;
22bedad3 906 netdev_for_each_mc_addr(ha, dev) {
c60c9c71
SL
907 u16 *adrp = (u16 *) ha->addr;
908 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
909 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
910 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
911 i++;
912 }
913 while (i < MCAST_MAX) {
914 iowrite16(0, ioaddr + MID_1L + 8 * i);
915 iowrite16(0, ioaddr + MID_1M + 8 * i);
916 iowrite16(0, ioaddr + MID_1H + 8 * i);
917 i++;
918 }
919 }
920 /* Otherwise, Enable multicast hash table function. */
921 else {
922 u32 crc;
7a47dd7a 923
c60c9c71
SL
924 lp->mcr0 |= MCR0_HASH_EN;
925
926 for (i = 0; i < MCAST_MAX ; i++) {
927 iowrite16(0, ioaddr + MID_1L + 8 * i);
928 iowrite16(0, ioaddr + MID_1M + 8 * i);
929 iowrite16(0, ioaddr + MID_1H + 8 * i);
930 }
7a47dd7a 931
c60c9c71
SL
932 /* Build multicast hash table */
933 netdev_for_each_mc_addr(ha, dev) {
934 u8 *addrs = ha->addr;
935
936 crc = ether_crc(ETH_ALEN, addrs);
7a47dd7a 937 crc >>= 26;
c60c9c71 938 hash_table[crc >> 4] |= 1 << (crc & 0xf);
7a47dd7a 939 }
c60c9c71
SL
940 }
941
942 iowrite16(lp->mcr0, ioaddr + MCR0);
943
944 /* Fill the MAC hash tables with their values */
bbc13ab9 945 if (lp->mcr0 & MCR0_HASH_EN) {
7a47dd7a
SW
946 iowrite16(hash_table[0], ioaddr + MAR0);
947 iowrite16(hash_table[1], ioaddr + MAR1);
948 iowrite16(hash_table[2], ioaddr + MAR2);
949 iowrite16(hash_table[3], ioaddr + MAR3);
950 }
c60c9c71
SL
951
952 spin_unlock_irqrestore(&lp->lock, flags);
7a47dd7a
SW
953}
954
955static void netdev_get_drvinfo(struct net_device *dev,
956 struct ethtool_drvinfo *info)
957{
958 struct r6040_private *rp = netdev_priv(dev);
959
960 strcpy(info->driver, DRV_NAME);
961 strcpy(info->version, DRV_VERSION);
962 strcpy(info->bus_info, pci_name(rp->pdev));
963}
964
965static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
966{
967 struct r6040_private *rp = netdev_priv(dev);
7a47dd7a 968
3831861b 969 return phy_ethtool_gset(rp->phydev, cmd);
7a47dd7a
SW
970}
971
972static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7a47dd7a
SW
973{
974 struct r6040_private *rp = netdev_priv(dev);
975
3831861b 976 return phy_ethtool_sset(rp->phydev, cmd);
7a47dd7a
SW
977}
978
a7bd89cb 979static const struct ethtool_ops netdev_ethtool_ops = {
7a47dd7a
SW
980 .get_drvinfo = netdev_get_drvinfo,
981 .get_settings = netdev_get_settings,
982 .set_settings = netdev_set_settings,
3831861b 983 .get_link = ethtool_op_get_link,
d88e102d 984 .get_ts_info = ethtool_op_get_ts_info,
7a47dd7a
SW
985};
986
a7bd89cb
SH
987static const struct net_device_ops r6040_netdev_ops = {
988 .ndo_open = r6040_open,
989 .ndo_stop = r6040_close,
990 .ndo_start_xmit = r6040_start_xmit,
991 .ndo_get_stats = r6040_get_stats,
afc4b13d 992 .ndo_set_rx_mode = r6040_multicast_list,
a7bd89cb
SH
993 .ndo_change_mtu = eth_change_mtu,
994 .ndo_validate_addr = eth_validate_addr,
2154c704 995 .ndo_set_mac_address = eth_mac_addr,
a7bd89cb
SH
996 .ndo_do_ioctl = r6040_ioctl,
997 .ndo_tx_timeout = r6040_tx_timeout,
998#ifdef CONFIG_NET_POLL_CONTROLLER
999 .ndo_poll_controller = r6040_poll_controller,
1000#endif
1001};
1002
3831861b
FF
1003static void r6040_adjust_link(struct net_device *dev)
1004{
1005 struct r6040_private *lp = netdev_priv(dev);
1006 struct phy_device *phydev = lp->phydev;
1007 int status_changed = 0;
1008 void __iomem *ioaddr = lp->base;
1009
1010 BUG_ON(!phydev);
1011
1012 if (lp->old_link != phydev->link) {
1013 status_changed = 1;
1014 lp->old_link = phydev->link;
1015 }
1016
1017 /* reflect duplex change */
1018 if (phydev->link && (lp->old_duplex != phydev->duplex)) {
4e16d6eb 1019 lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
3831861b
FF
1020 iowrite16(lp->mcr0, ioaddr);
1021
1022 status_changed = 1;
1023 lp->old_duplex = phydev->duplex;
1024 }
1025
1026 if (status_changed) {
1027 pr_info("%s: link %s", dev->name, phydev->link ?
1028 "UP" : "DOWN");
1029 if (phydev->link)
1030 pr_cont(" - %d/%s", phydev->speed,
1031 DUPLEX_FULL == phydev->duplex ? "full" : "half");
1032 pr_cont("\n");
1033 }
1034}
1035
1036static int r6040_mii_probe(struct net_device *dev)
1037{
1038 struct r6040_private *lp = netdev_priv(dev);
1039 struct phy_device *phydev = NULL;
1040
1041 phydev = phy_find_first(lp->mii_bus);
1042 if (!phydev) {
1043 dev_err(&lp->pdev->dev, "no PHY found\n");
1044 return -ENODEV;
1045 }
1046
1047 phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
1048 0, PHY_INTERFACE_MODE_MII);
1049
1050 if (IS_ERR(phydev)) {
1051 dev_err(&lp->pdev->dev, "could not attach to PHY\n");
1052 return PTR_ERR(phydev);
1053 }
1054
1055 /* mask with MAC supported features */
1056 phydev->supported &= (SUPPORTED_10baseT_Half
1057 | SUPPORTED_10baseT_Full
1058 | SUPPORTED_100baseT_Half
1059 | SUPPORTED_100baseT_Full
1060 | SUPPORTED_Autoneg
1061 | SUPPORTED_MII
1062 | SUPPORTED_TP);
1063
1064 phydev->advertising = phydev->supported;
1065 lp->phydev = phydev;
1066 lp->old_link = 0;
1067 lp->old_duplex = -1;
1068
1069 dev_info(&lp->pdev->dev, "attached PHY driver [%s] "
1070 "(mii_bus:phy_addr=%s)\n",
1071 phydev->drv->name, dev_name(&phydev->dev));
1072
1073 return 0;
1074}
1075
7a47dd7a
SW
1076static int __devinit r6040_init_one(struct pci_dev *pdev,
1077 const struct pci_device_id *ent)
1078{
1079 struct net_device *dev;
1080 struct r6040_private *lp;
1081 void __iomem *ioaddr;
1082 int err, io_size = R6040_IO_SIZE;
1083 static int card_idx = -1;
1084 int bar = 0;
7a47dd7a 1085 u16 *adrp;
3831861b 1086 int i;
7a47dd7a 1087
2154c704 1088 pr_info("%s\n", version);
7a47dd7a
SW
1089
1090 err = pci_enable_device(pdev);
1091 if (err)
b0e45390 1092 goto err_out;
7a47dd7a
SW
1093
1094 /* this should always be supported */
284901a9 1095 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
b0e45390 1096 if (err) {
7d53b809 1097 dev_err(&pdev->dev, "32-bit PCI DMA addresses"
7a47dd7a 1098 "not supported by the card\n");
acaf8276 1099 goto err_out_disable_dev;
7a47dd7a 1100 }
284901a9 1101 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
b0e45390 1102 if (err) {
7d53b809 1103 dev_err(&pdev->dev, "32-bit PCI DMA addresses"
092427be 1104 "not supported by the card\n");
acaf8276 1105 goto err_out_disable_dev;
092427be 1106 }
7a47dd7a
SW
1107
1108 /* IO Size check */
6f5bec19 1109 if (pci_resource_len(pdev, bar) < io_size) {
7d53b809 1110 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
b0e45390 1111 err = -EIO;
acaf8276 1112 goto err_out_disable_dev;
7a47dd7a
SW
1113 }
1114
7a47dd7a
SW
1115 pci_set_master(pdev);
1116
1117 dev = alloc_etherdev(sizeof(struct r6040_private));
1118 if (!dev) {
b0e45390 1119 err = -ENOMEM;
acaf8276 1120 goto err_out_disable_dev;
7a47dd7a
SW
1121 }
1122 SET_NETDEV_DEV(dev, &pdev->dev);
1123 lp = netdev_priv(dev);
7a47dd7a 1124
b0e45390
FF
1125 err = pci_request_regions(pdev, DRV_NAME);
1126
1127 if (err) {
7d53b809 1128 dev_err(&pdev->dev, "Failed to request PCI regions\n");
b0e45390 1129 goto err_out_free_dev;
7a47dd7a
SW
1130 }
1131
1132 ioaddr = pci_iomap(pdev, bar, io_size);
1133 if (!ioaddr) {
7d53b809 1134 dev_err(&pdev->dev, "ioremap failed for device\n");
b0e45390
FF
1135 err = -EIO;
1136 goto err_out_free_res;
7a47dd7a 1137 }
31171aec 1138
84314bf9 1139 /* If PHY status change register is still set to zero it means the
31171aec
FF
1140 * bootloader didn't initialize it, so we set it to:
1141 * - enable phy status change
1142 * - enable all phy addresses
1143 * - set to lowest timer divider */
84314bf9 1144 if (ioread16(ioaddr + PHY_CC) == 0)
31171aec
FF
1145 iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
1146 7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
7a47dd7a
SW
1147
1148 /* Init system & device */
7a47dd7a
SW
1149 lp->base = ioaddr;
1150 dev->irq = pdev->irq;
1151
1152 spin_lock_init(&lp->lock);
1153 pci_set_drvdata(pdev, dev);
1154
1155 /* Set MAC address */
1156 card_idx++;
1157
1158 adrp = (u16 *)dev->dev_addr;
1159 adrp[0] = ioread16(ioaddr + MID_0L);
1160 adrp[1] = ioread16(ioaddr + MID_0M);
1161 adrp[2] = ioread16(ioaddr + MID_0H);
1162
1d2b1a76
FF
1163 /* Some bootloader/BIOSes do not initialize
1164 * MAC address, warn about that */
9f113618 1165 if (!(adrp[0] || adrp[1] || adrp[2])) {
2154c704
FF
1166 netdev_warn(dev, "MAC address not initialized, "
1167 "generating random\n");
f2cedb63 1168 eth_hw_addr_random(dev);
9f113618 1169 }
1d2b1a76 1170
7a47dd7a
SW
1171 /* Link new device into r6040_root_dev */
1172 lp->pdev = pdev;
129cf9a7 1173 lp->dev = dev;
7a47dd7a
SW
1174
1175 /* Init RDC private data */
77e1e438 1176 lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN;
7a47dd7a
SW
1177
1178 /* The RDC-specific entries in the device structure. */
a7bd89cb 1179 dev->netdev_ops = &r6040_netdev_ops;
7a47dd7a 1180 dev->ethtool_ops = &netdev_ethtool_ops;
7a47dd7a 1181 dev->watchdog_timeo = TX_TIMEOUT;
a7bd89cb 1182
7a47dd7a 1183 netif_napi_add(dev, &lp->napi, r6040_poll, 64);
3831861b
FF
1184
1185 lp->mii_bus = mdiobus_alloc();
1186 if (!lp->mii_bus) {
1187 dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
9c86c0f4 1188 err = -ENOMEM;
e03f614a
MK
1189 goto err_out_unmap;
1190 }
1191
3831861b
FF
1192 lp->mii_bus->priv = dev;
1193 lp->mii_bus->read = r6040_mdiobus_read;
1194 lp->mii_bus->write = r6040_mdiobus_write;
1195 lp->mii_bus->reset = r6040_mdiobus_reset;
1196 lp->mii_bus->name = "r6040_eth_mii";
817380e1
FF
1197 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1198 dev_name(&pdev->dev), card_idx);
3831861b
FF
1199 lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1200 if (!lp->mii_bus->irq) {
1201 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
9c86c0f4 1202 err = -ENOMEM;
3831861b
FF
1203 goto err_out_mdio;
1204 }
1205
1206 for (i = 0; i < PHY_MAX_ADDR; i++)
1207 lp->mii_bus->irq[i] = PHY_POLL;
1208
1209 err = mdiobus_register(lp->mii_bus);
1210 if (err) {
1211 dev_err(&pdev->dev, "failed to register MII bus\n");
1212 goto err_out_mdio_irq;
1213 }
1214
1215 err = r6040_mii_probe(dev);
1216 if (err) {
1217 dev_err(&pdev->dev, "failed to probe MII bus\n");
1218 goto err_out_mdio_unregister;
1219 }
1220
7a47dd7a
SW
1221 /* Register net device. After this dev->name assign */
1222 err = register_netdev(dev);
1223 if (err) {
7d53b809 1224 dev_err(&pdev->dev, "Failed to register net device\n");
3831861b 1225 goto err_out_mdio_unregister;
7a47dd7a
SW
1226 }
1227 return 0;
1228
3831861b
FF
1229err_out_mdio_unregister:
1230 mdiobus_unregister(lp->mii_bus);
1231err_out_mdio_irq:
1232 kfree(lp->mii_bus->irq);
1233err_out_mdio:
1234 mdiobus_free(lp->mii_bus);
b0e45390 1235err_out_unmap:
20571d88
DN
1236 netif_napi_del(&lp->napi);
1237 pci_set_drvdata(pdev, NULL);
b0e45390
FF
1238 pci_iounmap(pdev, ioaddr);
1239err_out_free_res:
7a47dd7a 1240 pci_release_regions(pdev);
b0e45390 1241err_out_free_dev:
7a47dd7a 1242 free_netdev(dev);
acaf8276
DN
1243err_out_disable_dev:
1244 pci_disable_device(pdev);
b0e45390 1245err_out:
7a47dd7a
SW
1246 return err;
1247}
1248
1249static void __devexit r6040_remove_one(struct pci_dev *pdev)
1250{
1251 struct net_device *dev = pci_get_drvdata(pdev);
3831861b 1252 struct r6040_private *lp = netdev_priv(dev);
7a47dd7a
SW
1253
1254 unregister_netdev(dev);
3831861b
FF
1255 mdiobus_unregister(lp->mii_bus);
1256 kfree(lp->mii_bus->irq);
1257 mdiobus_free(lp->mii_bus);
20571d88
DN
1258 netif_napi_del(&lp->napi);
1259 pci_set_drvdata(pdev, NULL);
1260 pci_iounmap(pdev, lp->base);
7a47dd7a
SW
1261 pci_release_regions(pdev);
1262 free_netdev(dev);
1263 pci_disable_device(pdev);
1264 pci_set_drvdata(pdev, NULL);
1265}
1266
1267
a3aa1884 1268static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
5ac5d616
FR
1269 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
1270 { 0 }
7a47dd7a
SW
1271};
1272MODULE_DEVICE_TABLE(pci, r6040_pci_tbl);
1273
1274static struct pci_driver r6040_driver = {
5ac5d616 1275 .name = DRV_NAME,
7a47dd7a
SW
1276 .id_table = r6040_pci_tbl,
1277 .probe = r6040_init_one,
1278 .remove = __devexit_p(r6040_remove_one),
1279};
1280
1281
1282static int __init r6040_init(void)
1283{
1284 return pci_register_driver(&r6040_driver);
1285}
1286
1287
1288static void __exit r6040_cleanup(void)
1289{
1290 pci_unregister_driver(&r6040_driver);
1291}
1292
1293module_init(r6040_init);
1294module_exit(r6040_cleanup);