Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfashe...
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / sundance.c
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
22
23 */
24
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
28
29
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
36
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
43
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55 */
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
58
59
60 /* Operational parameters that are set at compile time. */
61
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
71 #define RX_BUDGET 32
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/pci.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/skbuff.h>
93 #include <linux/init.h>
94 #include <linux/bitops.h>
95 #include <asm/uaccess.h>
96 #include <asm/processor.h> /* Processor type for cache alignment. */
97 #include <asm/io.h>
98 #include <linux/delay.h>
99 #include <linux/spinlock.h>
100 #ifndef _COMPAT_WITH_OLD_KERNEL
101 #include <linux/crc32.h>
102 #include <linux/ethtool.h>
103 #include <linux/mii.h>
104 #else
105 #include "crc32.h"
106 #include "ethtool.h"
107 #include "mii.h"
108 #include "compat.h"
109 #endif
110
111 /* These identify the driver base version and may not be removed. */
112 static char version[] =
113 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
114 KERN_INFO " http://www.scyld.com/network/sundance.html\n";
115
116 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
118 MODULE_LICENSE("GPL");
119
120 module_param(debug, int, 0);
121 module_param(rx_copybreak, int, 0);
122 module_param_array(media, charp, NULL, 0);
123 module_param(flowctrl, int, 0);
124 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
125 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
126 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
127
128 /*
129 Theory of Operation
130
131 I. Board Compatibility
132
133 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
134
135 II. Board-specific settings
136
137 III. Driver operation
138
139 IIIa. Ring buffers
140
141 This driver uses two statically allocated fixed-size descriptor lists
142 formed into rings by a branch from the final descriptor to the beginning of
143 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
144 Some chips explicitly use only 2^N sized rings, while others use a
145 'next descriptor' pointer that the driver forms into rings.
146
147 IIIb/c. Transmit/Receive Structure
148
149 This driver uses a zero-copy receive and transmit scheme.
150 The driver allocates full frame size skbuffs for the Rx ring buffers at
151 open() time and passes the skb->data field to the chip as receive data
152 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
153 a fresh skbuff is allocated and the frame is copied to the new skbuff.
154 When the incoming frame is larger, the skbuff is passed directly up the
155 protocol stack. Buffers consumed this way are replaced by newly allocated
156 skbuffs in a later phase of receives.
157
158 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
159 using a full-sized skbuff for small frames vs. the copying costs of larger
160 frames. New boards are typically used in generously configured machines
161 and the underfilled buffers have negligible impact compared to the benefit of
162 a single allocation size, so the default value of zero results in never
163 copying packets. When copying is done, the cost is usually mitigated by using
164 a combined copy/checksum routine. Copying also preloads the cache, which is
165 most useful with small frames.
166
167 A subtle aspect of the operation is that the IP header at offset 14 in an
168 ethernet frame isn't longword aligned for further processing.
169 Unaligned buffers are permitted by the Sundance hardware, so
170 frames are received into the skbuff at an offset of "+2", 16-byte aligning
171 the IP header.
172
173 IIId. Synchronization
174
175 The driver runs as two independent, single-threaded flows of control. One
176 is the send-packet routine, which enforces single-threaded use by the
177 dev->tbusy flag. The other thread is the interrupt handler, which is single
178 threaded by the hardware and interrupt handling software.
179
180 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
181 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
182 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
183 the 'lp->tx_full' flag.
184
185 The interrupt handler has exclusive control over the Rx ring and records stats
186 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
187 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
188 clears both the tx_full and tbusy flags.
189
190 IV. Notes
191
192 IVb. References
193
194 The Sundance ST201 datasheet, preliminary version.
195 The Kendin KS8723 datasheet, preliminary version.
196 The ICplus IP100 datasheet, preliminary version.
197 http://www.scyld.com/expert/100mbps.html
198 http://www.scyld.com/expert/NWay.html
199
200 IVc. Errata
201
202 */
203
204 /* Work-around for Kendin chip bugs. */
205 #ifndef CONFIG_SUNDANCE_MMIO
206 #define USE_IO_OPS 1
207 #endif
208
209 static const struct pci_device_id sundance_pci_tbl[] = {
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
213 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
214 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
215 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
216 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
217 { }
218 };
219 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
220
221 enum {
222 netdev_io_size = 128
223 };
224
225 struct pci_id_info {
226 const char *name;
227 };
228 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
229 {"D-Link DFE-550TX FAST Ethernet Adapter"},
230 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
231 {"D-Link DFE-580TX 4 port Server Adapter"},
232 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
233 {"D-Link DL10050-based FAST Ethernet Adapter"},
234 {"Sundance Technology Alta"},
235 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
236 { } /* terminate list. */
237 };
238
239 /* This driver was written to use PCI memory space, however x86-oriented
240 hardware often uses I/O space accesses. */
241
242 /* Offsets to the device registers.
243 Unlike software-only systems, device drivers interact with complex hardware.
244 It's not useful to define symbolic names for every register bit in the
245 device. The name can only partially document the semantics and make
246 the driver longer and more difficult to read.
247 In general, only the important configuration values or bits changed
248 multiple times should be defined symbolically.
249 */
250 enum alta_offsets {
251 DMACtrl = 0x00,
252 TxListPtr = 0x04,
253 TxDMABurstThresh = 0x08,
254 TxDMAUrgentThresh = 0x09,
255 TxDMAPollPeriod = 0x0a,
256 RxDMAStatus = 0x0c,
257 RxListPtr = 0x10,
258 DebugCtrl0 = 0x1a,
259 DebugCtrl1 = 0x1c,
260 RxDMABurstThresh = 0x14,
261 RxDMAUrgentThresh = 0x15,
262 RxDMAPollPeriod = 0x16,
263 LEDCtrl = 0x1a,
264 ASICCtrl = 0x30,
265 EEData = 0x34,
266 EECtrl = 0x36,
267 FlashAddr = 0x40,
268 FlashData = 0x44,
269 TxStatus = 0x46,
270 TxFrameId = 0x47,
271 DownCounter = 0x18,
272 IntrClear = 0x4a,
273 IntrEnable = 0x4c,
274 IntrStatus = 0x4e,
275 MACCtrl0 = 0x50,
276 MACCtrl1 = 0x52,
277 StationAddr = 0x54,
278 MaxFrameSize = 0x5A,
279 RxMode = 0x5c,
280 MIICtrl = 0x5e,
281 MulticastFilter0 = 0x60,
282 MulticastFilter1 = 0x64,
283 RxOctetsLow = 0x68,
284 RxOctetsHigh = 0x6a,
285 TxOctetsLow = 0x6c,
286 TxOctetsHigh = 0x6e,
287 TxFramesOK = 0x70,
288 RxFramesOK = 0x72,
289 StatsCarrierError = 0x74,
290 StatsLateColl = 0x75,
291 StatsMultiColl = 0x76,
292 StatsOneColl = 0x77,
293 StatsTxDefer = 0x78,
294 RxMissed = 0x79,
295 StatsTxXSDefer = 0x7a,
296 StatsTxAbort = 0x7b,
297 StatsBcastTx = 0x7c,
298 StatsBcastRx = 0x7d,
299 StatsMcastTx = 0x7e,
300 StatsMcastRx = 0x7f,
301 /* Aliased and bogus values! */
302 RxStatus = 0x0c,
303 };
304 enum ASICCtrl_HiWord_bit {
305 GlobalReset = 0x0001,
306 RxReset = 0x0002,
307 TxReset = 0x0004,
308 DMAReset = 0x0008,
309 FIFOReset = 0x0010,
310 NetworkReset = 0x0020,
311 HostReset = 0x0040,
312 ResetBusy = 0x0400,
313 };
314
315 /* Bits in the interrupt status/mask registers. */
316 enum intr_status_bits {
317 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
318 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
319 IntrDrvRqst=0x0040,
320 StatsMax=0x0080, LinkChange=0x0100,
321 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
322 };
323
324 /* Bits in the RxMode register. */
325 enum rx_mode_bits {
326 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
327 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
328 };
329 /* Bits in MACCtrl. */
330 enum mac_ctrl0_bits {
331 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
332 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
333 };
334 enum mac_ctrl1_bits {
335 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
336 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
337 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
338 };
339
340 /* The Rx and Tx buffer descriptors. */
341 /* Note that using only 32 bit fields simplifies conversion to big-endian
342 architectures. */
343 struct netdev_desc {
344 u32 next_desc;
345 u32 status;
346 struct desc_frag { u32 addr, length; } frag[1];
347 };
348
349 /* Bits in netdev_desc.status */
350 enum desc_status_bits {
351 DescOwn=0x8000,
352 DescEndPacket=0x4000,
353 DescEndRing=0x2000,
354 LastFrag=0x80000000,
355 DescIntrOnTx=0x8000,
356 DescIntrOnDMADone=0x80000000,
357 DisableAlign = 0x00000001,
358 };
359
360 #define PRIV_ALIGN 15 /* Required alignment mask */
361 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
362 within the structure. */
363 #define MII_CNT 4
364 struct netdev_private {
365 /* Descriptor rings first for alignment. */
366 struct netdev_desc *rx_ring;
367 struct netdev_desc *tx_ring;
368 struct sk_buff* rx_skbuff[RX_RING_SIZE];
369 struct sk_buff* tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_ring_dma;
371 dma_addr_t rx_ring_dma;
372 struct net_device_stats stats;
373 struct timer_list timer; /* Media monitoring timer. */
374 /* Frequently used values: keep some adjacent for cache effect. */
375 spinlock_t lock;
376 spinlock_t rx_lock; /* Group with Tx control cache line. */
377 int msg_enable;
378 int chip_id;
379 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
380 unsigned int rx_buf_sz; /* Based on MTU+slack. */
381 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
382 unsigned int cur_tx, dirty_tx;
383 /* These values are keep track of the transceiver/media in use. */
384 unsigned int flowctrl:1;
385 unsigned int default_port:4; /* Last dev->if_port value. */
386 unsigned int an_enable:1;
387 unsigned int speed;
388 struct tasklet_struct rx_tasklet;
389 struct tasklet_struct tx_tasklet;
390 int budget;
391 int cur_task;
392 /* Multicast and receive mode. */
393 spinlock_t mcastlock; /* SMP lock multicast updates. */
394 u16 mcast_filter[4];
395 /* MII transceiver section. */
396 struct mii_if_info mii_if;
397 int mii_preamble_required;
398 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
399 struct pci_dev *pci_dev;
400 void __iomem *base;
401 unsigned char pci_rev_id;
402 };
403
404 /* The station address location in the EEPROM. */
405 #define EEPROM_SA_OFFSET 0x10
406 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
407 IntrDrvRqst | IntrTxDone | StatsMax | \
408 LinkChange)
409
410 static int change_mtu(struct net_device *dev, int new_mtu);
411 static int eeprom_read(void __iomem *ioaddr, int location);
412 static int mdio_read(struct net_device *dev, int phy_id, int location);
413 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
414 static int netdev_open(struct net_device *dev);
415 static void check_duplex(struct net_device *dev);
416 static void netdev_timer(unsigned long data);
417 static void tx_timeout(struct net_device *dev);
418 static void init_ring(struct net_device *dev);
419 static int start_tx(struct sk_buff *skb, struct net_device *dev);
420 static int reset_tx (struct net_device *dev);
421 static irqreturn_t intr_handler(int irq, void *dev_instance);
422 static void rx_poll(unsigned long data);
423 static void tx_poll(unsigned long data);
424 static void refill_rx (struct net_device *dev);
425 static void netdev_error(struct net_device *dev, int intr_status);
426 static void netdev_error(struct net_device *dev, int intr_status);
427 static void set_rx_mode(struct net_device *dev);
428 static int __set_mac_addr(struct net_device *dev);
429 static struct net_device_stats *get_stats(struct net_device *dev);
430 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
431 static int netdev_close(struct net_device *dev);
432 static const struct ethtool_ops ethtool_ops;
433
434 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
435 {
436 struct netdev_private *np = netdev_priv(dev);
437 void __iomem *ioaddr = np->base + ASICCtrl;
438 int countdown;
439
440 /* ST201 documentation states ASICCtrl is a 32bit register */
441 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
442 /* ST201 documentation states reset can take up to 1 ms */
443 countdown = 10 + 1;
444 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
445 if (--countdown == 0) {
446 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
447 break;
448 }
449 udelay(100);
450 }
451 }
452
453 static int __devinit sundance_probe1 (struct pci_dev *pdev,
454 const struct pci_device_id *ent)
455 {
456 struct net_device *dev;
457 struct netdev_private *np;
458 static int card_idx;
459 int chip_idx = ent->driver_data;
460 int irq;
461 int i;
462 void __iomem *ioaddr;
463 u16 mii_ctl;
464 void *ring_space;
465 dma_addr_t ring_dma;
466 #ifdef USE_IO_OPS
467 int bar = 0;
468 #else
469 int bar = 1;
470 #endif
471 int phy, phy_idx = 0;
472
473
474 /* when built into the kernel, we only print version if device is found */
475 #ifndef MODULE
476 static int printed_version;
477 if (!printed_version++)
478 printk(version);
479 #endif
480
481 if (pci_enable_device(pdev))
482 return -EIO;
483 pci_set_master(pdev);
484
485 irq = pdev->irq;
486
487 dev = alloc_etherdev(sizeof(*np));
488 if (!dev)
489 return -ENOMEM;
490 SET_MODULE_OWNER(dev);
491 SET_NETDEV_DEV(dev, &pdev->dev);
492
493 if (pci_request_regions(pdev, DRV_NAME))
494 goto err_out_netdev;
495
496 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
497 if (!ioaddr)
498 goto err_out_res;
499
500 for (i = 0; i < 3; i++)
501 ((u16 *)dev->dev_addr)[i] =
502 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
503 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
504
505 dev->base_addr = (unsigned long)ioaddr;
506 dev->irq = irq;
507
508 np = netdev_priv(dev);
509 np->base = ioaddr;
510 np->pci_dev = pdev;
511 np->chip_id = chip_idx;
512 np->msg_enable = (1 << debug) - 1;
513 spin_lock_init(&np->lock);
514 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
515 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
516
517 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
518 if (!ring_space)
519 goto err_out_cleardev;
520 np->tx_ring = (struct netdev_desc *)ring_space;
521 np->tx_ring_dma = ring_dma;
522
523 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
524 if (!ring_space)
525 goto err_out_unmap_tx;
526 np->rx_ring = (struct netdev_desc *)ring_space;
527 np->rx_ring_dma = ring_dma;
528
529 np->mii_if.dev = dev;
530 np->mii_if.mdio_read = mdio_read;
531 np->mii_if.mdio_write = mdio_write;
532 np->mii_if.phy_id_mask = 0x1f;
533 np->mii_if.reg_num_mask = 0x1f;
534
535 /* The chip-specific entries in the device structure. */
536 dev->open = &netdev_open;
537 dev->hard_start_xmit = &start_tx;
538 dev->stop = &netdev_close;
539 dev->get_stats = &get_stats;
540 dev->set_multicast_list = &set_rx_mode;
541 dev->do_ioctl = &netdev_ioctl;
542 SET_ETHTOOL_OPS(dev, &ethtool_ops);
543 dev->tx_timeout = &tx_timeout;
544 dev->watchdog_timeo = TX_TIMEOUT;
545 dev->change_mtu = &change_mtu;
546 pci_set_drvdata(pdev, dev);
547
548 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
549
550 i = register_netdev(dev);
551 if (i)
552 goto err_out_unmap_rx;
553
554 printk(KERN_INFO "%s: %s at %p, ",
555 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
556 for (i = 0; i < 5; i++)
557 printk("%2.2x:", dev->dev_addr[i]);
558 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
559
560 np->phys[0] = 1; /* Default setting */
561 np->mii_preamble_required++;
562 /*
563 * It seems some phys doesn't deal well with address 0 being accessed
564 * first, so leave address zero to the end of the loop (32 & 31).
565 */
566 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
567 int phyx = phy & 0x1f;
568 int mii_status = mdio_read(dev, phyx, MII_BMSR);
569 if (mii_status != 0xffff && mii_status != 0x0000) {
570 np->phys[phy_idx++] = phyx;
571 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
572 if ((mii_status & 0x0040) == 0)
573 np->mii_preamble_required++;
574 printk(KERN_INFO "%s: MII PHY found at address %d, status "
575 "0x%4.4x advertising %4.4x.\n",
576 dev->name, phyx, mii_status, np->mii_if.advertising);
577 }
578 }
579 np->mii_preamble_required--;
580
581 if (phy_idx == 0) {
582 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
583 dev->name, ioread32(ioaddr + ASICCtrl));
584 goto err_out_unregister;
585 }
586
587 np->mii_if.phy_id = np->phys[0];
588
589 /* Parse override configuration */
590 np->an_enable = 1;
591 if (card_idx < MAX_UNITS) {
592 if (media[card_idx] != NULL) {
593 np->an_enable = 0;
594 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
595 strcmp (media[card_idx], "4") == 0) {
596 np->speed = 100;
597 np->mii_if.full_duplex = 1;
598 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
599 || strcmp (media[card_idx], "3") == 0) {
600 np->speed = 100;
601 np->mii_if.full_duplex = 0;
602 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
603 strcmp (media[card_idx], "2") == 0) {
604 np->speed = 10;
605 np->mii_if.full_duplex = 1;
606 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
607 strcmp (media[card_idx], "1") == 0) {
608 np->speed = 10;
609 np->mii_if.full_duplex = 0;
610 } else {
611 np->an_enable = 1;
612 }
613 }
614 if (flowctrl == 1)
615 np->flowctrl = 1;
616 }
617
618 /* Fibre PHY? */
619 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
620 /* Default 100Mbps Full */
621 if (np->an_enable) {
622 np->speed = 100;
623 np->mii_if.full_duplex = 1;
624 np->an_enable = 0;
625 }
626 }
627 /* Reset PHY */
628 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
629 mdelay (300);
630 /* If flow control enabled, we need to advertise it.*/
631 if (np->flowctrl)
632 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
633 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
634 /* Force media type */
635 if (!np->an_enable) {
636 mii_ctl = 0;
637 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
638 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
639 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
640 printk (KERN_INFO "Override speed=%d, %s duplex\n",
641 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
642
643 }
644
645 /* Perhaps move the reset here? */
646 /* Reset the chip to erase previous misconfiguration. */
647 if (netif_msg_hw(np))
648 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
649 sundance_reset(dev, 0x00ff << 16);
650 if (netif_msg_hw(np))
651 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
652
653 card_idx++;
654 return 0;
655
656 err_out_unregister:
657 unregister_netdev(dev);
658 err_out_unmap_rx:
659 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
660 err_out_unmap_tx:
661 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
662 err_out_cleardev:
663 pci_set_drvdata(pdev, NULL);
664 pci_iounmap(pdev, ioaddr);
665 err_out_res:
666 pci_release_regions(pdev);
667 err_out_netdev:
668 free_netdev (dev);
669 return -ENODEV;
670 }
671
672 static int change_mtu(struct net_device *dev, int new_mtu)
673 {
674 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
675 return -EINVAL;
676 if (netif_running(dev))
677 return -EBUSY;
678 dev->mtu = new_mtu;
679 return 0;
680 }
681
682 #define eeprom_delay(ee_addr) ioread32(ee_addr)
683 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
684 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
685 {
686 int boguscnt = 10000; /* Typical 1900 ticks. */
687 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
688 do {
689 eeprom_delay(ioaddr + EECtrl);
690 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
691 return ioread16(ioaddr + EEData);
692 }
693 } while (--boguscnt > 0);
694 return 0;
695 }
696
697 /* MII transceiver control section.
698 Read and write the MII registers using software-generated serial
699 MDIO protocol. See the MII specifications or DP83840A data sheet
700 for details.
701
702 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
703 met by back-to-back 33Mhz PCI cycles. */
704 #define mdio_delay() ioread8(mdio_addr)
705
706 enum mii_reg_bits {
707 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
708 };
709 #define MDIO_EnbIn (0)
710 #define MDIO_WRITE0 (MDIO_EnbOutput)
711 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
712
713 /* Generate the preamble required for initial synchronization and
714 a few older transceivers. */
715 static void mdio_sync(void __iomem *mdio_addr)
716 {
717 int bits = 32;
718
719 /* Establish sync by sending at least 32 logic ones. */
720 while (--bits >= 0) {
721 iowrite8(MDIO_WRITE1, mdio_addr);
722 mdio_delay();
723 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
724 mdio_delay();
725 }
726 }
727
728 static int mdio_read(struct net_device *dev, int phy_id, int location)
729 {
730 struct netdev_private *np = netdev_priv(dev);
731 void __iomem *mdio_addr = np->base + MIICtrl;
732 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
733 int i, retval = 0;
734
735 if (np->mii_preamble_required)
736 mdio_sync(mdio_addr);
737
738 /* Shift the read command bits out. */
739 for (i = 15; i >= 0; i--) {
740 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
741
742 iowrite8(dataval, mdio_addr);
743 mdio_delay();
744 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
745 mdio_delay();
746 }
747 /* Read the two transition, 16 data, and wire-idle bits. */
748 for (i = 19; i > 0; i--) {
749 iowrite8(MDIO_EnbIn, mdio_addr);
750 mdio_delay();
751 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
752 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
753 mdio_delay();
754 }
755 return (retval>>1) & 0xffff;
756 }
757
758 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
759 {
760 struct netdev_private *np = netdev_priv(dev);
761 void __iomem *mdio_addr = np->base + MIICtrl;
762 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
763 int i;
764
765 if (np->mii_preamble_required)
766 mdio_sync(mdio_addr);
767
768 /* Shift the command bits out. */
769 for (i = 31; i >= 0; i--) {
770 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
771
772 iowrite8(dataval, mdio_addr);
773 mdio_delay();
774 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
775 mdio_delay();
776 }
777 /* Clear out extra bits. */
778 for (i = 2; i > 0; i--) {
779 iowrite8(MDIO_EnbIn, mdio_addr);
780 mdio_delay();
781 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
782 mdio_delay();
783 }
784 return;
785 }
786
787 static int netdev_open(struct net_device *dev)
788 {
789 struct netdev_private *np = netdev_priv(dev);
790 void __iomem *ioaddr = np->base;
791 unsigned long flags;
792 int i;
793
794 /* Do we need to reset the chip??? */
795
796 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
797 if (i)
798 return i;
799
800 if (netif_msg_ifup(np))
801 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
802 dev->name, dev->irq);
803 init_ring(dev);
804
805 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
806 /* The Tx list pointer is written as packets are queued. */
807
808 /* Initialize other registers. */
809 __set_mac_addr(dev);
810 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
811 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
812 #else
813 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
814 #endif
815 if (dev->mtu > 2047)
816 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
817
818 /* Configure the PCI bus bursts and FIFO thresholds. */
819
820 if (dev->if_port == 0)
821 dev->if_port = np->default_port;
822
823 spin_lock_init(&np->mcastlock);
824
825 set_rx_mode(dev);
826 iowrite16(0, ioaddr + IntrEnable);
827 iowrite16(0, ioaddr + DownCounter);
828 /* Set the chip to poll every N*320nsec. */
829 iowrite8(100, ioaddr + RxDMAPollPeriod);
830 iowrite8(127, ioaddr + TxDMAPollPeriod);
831 /* Fix DFE-580TX packet drop issue */
832 if (np->pci_rev_id >= 0x14)
833 iowrite8(0x01, ioaddr + DebugCtrl1);
834 netif_start_queue(dev);
835
836 spin_lock_irqsave(&np->lock, flags);
837 reset_tx(dev);
838 spin_unlock_irqrestore(&np->lock, flags);
839
840 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
841
842 if (netif_msg_ifup(np))
843 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
844 "MAC Control %x, %4.4x %4.4x.\n",
845 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
846 ioread32(ioaddr + MACCtrl0),
847 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
848
849 /* Set the timer to check for link beat. */
850 init_timer(&np->timer);
851 np->timer.expires = jiffies + 3*HZ;
852 np->timer.data = (unsigned long)dev;
853 np->timer.function = &netdev_timer; /* timer handler */
854 add_timer(&np->timer);
855
856 /* Enable interrupts by setting the interrupt mask. */
857 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
858
859 return 0;
860 }
861
862 static void check_duplex(struct net_device *dev)
863 {
864 struct netdev_private *np = netdev_priv(dev);
865 void __iomem *ioaddr = np->base;
866 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
867 int negotiated = mii_lpa & np->mii_if.advertising;
868 int duplex;
869
870 /* Force media */
871 if (!np->an_enable || mii_lpa == 0xffff) {
872 if (np->mii_if.full_duplex)
873 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
874 ioaddr + MACCtrl0);
875 return;
876 }
877
878 /* Autonegotiation */
879 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
880 if (np->mii_if.full_duplex != duplex) {
881 np->mii_if.full_duplex = duplex;
882 if (netif_msg_link(np))
883 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
884 "negotiated capability %4.4x.\n", dev->name,
885 duplex ? "full" : "half", np->phys[0], negotiated);
886 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
887 }
888 }
889
890 static void netdev_timer(unsigned long data)
891 {
892 struct net_device *dev = (struct net_device *)data;
893 struct netdev_private *np = netdev_priv(dev);
894 void __iomem *ioaddr = np->base;
895 int next_tick = 10*HZ;
896
897 if (netif_msg_timer(np)) {
898 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
899 "Tx %x Rx %x.\n",
900 dev->name, ioread16(ioaddr + IntrEnable),
901 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
902 }
903 check_duplex(dev);
904 np->timer.expires = jiffies + next_tick;
905 add_timer(&np->timer);
906 }
907
908 static void tx_timeout(struct net_device *dev)
909 {
910 struct netdev_private *np = netdev_priv(dev);
911 void __iomem *ioaddr = np->base;
912 unsigned long flag;
913
914 netif_stop_queue(dev);
915 tasklet_disable(&np->tx_tasklet);
916 iowrite16(0, ioaddr + IntrEnable);
917 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
918 "TxFrameId %2.2x,"
919 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
920 ioread8(ioaddr + TxFrameId));
921
922 {
923 int i;
924 for (i=0; i<TX_RING_SIZE; i++) {
925 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
926 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
927 le32_to_cpu(np->tx_ring[i].next_desc),
928 le32_to_cpu(np->tx_ring[i].status),
929 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
930 le32_to_cpu(np->tx_ring[i].frag[0].addr),
931 le32_to_cpu(np->tx_ring[i].frag[0].length));
932 }
933 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
934 ioread32(np->base + TxListPtr),
935 netif_queue_stopped(dev));
936 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
937 np->cur_tx, np->cur_tx % TX_RING_SIZE,
938 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
939 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
940 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
941 }
942 spin_lock_irqsave(&np->lock, flag);
943
944 /* Stop and restart the chip's Tx processes . */
945 reset_tx(dev);
946 spin_unlock_irqrestore(&np->lock, flag);
947
948 dev->if_port = 0;
949
950 dev->trans_start = jiffies;
951 np->stats.tx_errors++;
952 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
953 netif_wake_queue(dev);
954 }
955 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
956 tasklet_enable(&np->tx_tasklet);
957 }
958
959
960 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
961 static void init_ring(struct net_device *dev)
962 {
963 struct netdev_private *np = netdev_priv(dev);
964 int i;
965
966 np->cur_rx = np->cur_tx = 0;
967 np->dirty_rx = np->dirty_tx = 0;
968 np->cur_task = 0;
969
970 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
971
972 /* Initialize all Rx descriptors. */
973 for (i = 0; i < RX_RING_SIZE; i++) {
974 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
975 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
976 np->rx_ring[i].status = 0;
977 np->rx_ring[i].frag[0].length = 0;
978 np->rx_skbuff[i] = NULL;
979 }
980
981 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
982 for (i = 0; i < RX_RING_SIZE; i++) {
983 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
984 np->rx_skbuff[i] = skb;
985 if (skb == NULL)
986 break;
987 skb->dev = dev; /* Mark as being used by this device. */
988 skb_reserve(skb, 2); /* 16 byte align the IP header. */
989 np->rx_ring[i].frag[0].addr = cpu_to_le32(
990 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
991 PCI_DMA_FROMDEVICE));
992 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
993 }
994 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
995
996 for (i = 0; i < TX_RING_SIZE; i++) {
997 np->tx_skbuff[i] = NULL;
998 np->tx_ring[i].status = 0;
999 }
1000 return;
1001 }
1002
1003 static void tx_poll (unsigned long data)
1004 {
1005 struct net_device *dev = (struct net_device *)data;
1006 struct netdev_private *np = netdev_priv(dev);
1007 unsigned head = np->cur_task % TX_RING_SIZE;
1008 struct netdev_desc *txdesc =
1009 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1010
1011 /* Chain the next pointer */
1012 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1013 int entry = np->cur_task % TX_RING_SIZE;
1014 txdesc = &np->tx_ring[entry];
1015 if (np->last_tx) {
1016 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1017 entry*sizeof(struct netdev_desc));
1018 }
1019 np->last_tx = txdesc;
1020 }
1021 /* Indicate the latest descriptor of tx ring */
1022 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1023
1024 if (ioread32 (np->base + TxListPtr) == 0)
1025 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1026 np->base + TxListPtr);
1027 return;
1028 }
1029
1030 static int
1031 start_tx (struct sk_buff *skb, struct net_device *dev)
1032 {
1033 struct netdev_private *np = netdev_priv(dev);
1034 struct netdev_desc *txdesc;
1035 unsigned entry;
1036
1037 /* Calculate the next Tx descriptor entry. */
1038 entry = np->cur_tx % TX_RING_SIZE;
1039 np->tx_skbuff[entry] = skb;
1040 txdesc = &np->tx_ring[entry];
1041
1042 txdesc->next_desc = 0;
1043 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1044 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1045 skb->len,
1046 PCI_DMA_TODEVICE));
1047 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1048
1049 /* Increment cur_tx before tasklet_schedule() */
1050 np->cur_tx++;
1051 mb();
1052 /* Schedule a tx_poll() task */
1053 tasklet_schedule(&np->tx_tasklet);
1054
1055 /* On some architectures: explicitly flush cache lines here. */
1056 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1057 && !netif_queue_stopped(dev)) {
1058 /* do nothing */
1059 } else {
1060 netif_stop_queue (dev);
1061 }
1062 dev->trans_start = jiffies;
1063 if (netif_msg_tx_queued(np)) {
1064 printk (KERN_DEBUG
1065 "%s: Transmit frame #%d queued in slot %d.\n",
1066 dev->name, np->cur_tx, entry);
1067 }
1068 return 0;
1069 }
1070
1071 /* Reset hardware tx and free all of tx buffers */
1072 static int
1073 reset_tx (struct net_device *dev)
1074 {
1075 struct netdev_private *np = netdev_priv(dev);
1076 void __iomem *ioaddr = np->base;
1077 struct sk_buff *skb;
1078 int i;
1079 int irq = in_interrupt();
1080
1081 /* Reset tx logic, TxListPtr will be cleaned */
1082 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1083 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1084
1085 /* free all tx skbuff */
1086 for (i = 0; i < TX_RING_SIZE; i++) {
1087 np->tx_ring[i].next_desc = 0;
1088
1089 skb = np->tx_skbuff[i];
1090 if (skb) {
1091 pci_unmap_single(np->pci_dev,
1092 np->tx_ring[i].frag[0].addr, skb->len,
1093 PCI_DMA_TODEVICE);
1094 if (irq)
1095 dev_kfree_skb_irq (skb);
1096 else
1097 dev_kfree_skb (skb);
1098 np->tx_skbuff[i] = NULL;
1099 np->stats.tx_dropped++;
1100 }
1101 }
1102 np->cur_tx = np->dirty_tx = 0;
1103 np->cur_task = 0;
1104
1105 np->last_tx = NULL;
1106 iowrite8(127, ioaddr + TxDMAPollPeriod);
1107
1108 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1109 return 0;
1110 }
1111
1112 /* The interrupt handler cleans up after the Tx thread,
1113 and schedule a Rx thread work */
1114 static irqreturn_t intr_handler(int irq, void *dev_instance)
1115 {
1116 struct net_device *dev = (struct net_device *)dev_instance;
1117 struct netdev_private *np = netdev_priv(dev);
1118 void __iomem *ioaddr = np->base;
1119 int hw_frame_id;
1120 int tx_cnt;
1121 int tx_status;
1122 int handled = 0;
1123 int i;
1124
1125
1126 do {
1127 int intr_status = ioread16(ioaddr + IntrStatus);
1128 iowrite16(intr_status, ioaddr + IntrStatus);
1129
1130 if (netif_msg_intr(np))
1131 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1132 dev->name, intr_status);
1133
1134 if (!(intr_status & DEFAULT_INTR))
1135 break;
1136
1137 handled = 1;
1138
1139 if (intr_status & (IntrRxDMADone)) {
1140 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1141 ioaddr + IntrEnable);
1142 if (np->budget < 0)
1143 np->budget = RX_BUDGET;
1144 tasklet_schedule(&np->rx_tasklet);
1145 }
1146 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1147 tx_status = ioread16 (ioaddr + TxStatus);
1148 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1149 if (netif_msg_tx_done(np))
1150 printk
1151 ("%s: Transmit status is %2.2x.\n",
1152 dev->name, tx_status);
1153 if (tx_status & 0x1e) {
1154 if (netif_msg_tx_err(np))
1155 printk("%s: Transmit error status %4.4x.\n",
1156 dev->name, tx_status);
1157 np->stats.tx_errors++;
1158 if (tx_status & 0x10)
1159 np->stats.tx_fifo_errors++;
1160 if (tx_status & 0x08)
1161 np->stats.collisions++;
1162 if (tx_status & 0x04)
1163 np->stats.tx_fifo_errors++;
1164 if (tx_status & 0x02)
1165 np->stats.tx_window_errors++;
1166
1167 /*
1168 ** This reset has been verified on
1169 ** DFE-580TX boards ! phdm@macqel.be.
1170 */
1171 if (tx_status & 0x10) { /* TxUnderrun */
1172 /* Restart Tx FIFO and transmitter */
1173 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1174 /* No need to reset the Tx pointer here */
1175 }
1176 /* Restart the Tx. Need to make sure tx enabled */
1177 i = 10;
1178 do {
1179 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1180 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1181 break;
1182 mdelay(1);
1183 } while (--i);
1184 }
1185 /* Yup, this is a documentation bug. It cost me *hours*. */
1186 iowrite16 (0, ioaddr + TxStatus);
1187 if (tx_cnt < 0) {
1188 iowrite32(5000, ioaddr + DownCounter);
1189 break;
1190 }
1191 tx_status = ioread16 (ioaddr + TxStatus);
1192 }
1193 hw_frame_id = (tx_status >> 8) & 0xff;
1194 } else {
1195 hw_frame_id = ioread8(ioaddr + TxFrameId);
1196 }
1197
1198 if (np->pci_rev_id >= 0x14) {
1199 spin_lock(&np->lock);
1200 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1201 int entry = np->dirty_tx % TX_RING_SIZE;
1202 struct sk_buff *skb;
1203 int sw_frame_id;
1204 sw_frame_id = (le32_to_cpu(
1205 np->tx_ring[entry].status) >> 2) & 0xff;
1206 if (sw_frame_id == hw_frame_id &&
1207 !(le32_to_cpu(np->tx_ring[entry].status)
1208 & 0x00010000))
1209 break;
1210 if (sw_frame_id == (hw_frame_id + 1) %
1211 TX_RING_SIZE)
1212 break;
1213 skb = np->tx_skbuff[entry];
1214 /* Free the original skb. */
1215 pci_unmap_single(np->pci_dev,
1216 np->tx_ring[entry].frag[0].addr,
1217 skb->len, PCI_DMA_TODEVICE);
1218 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1219 np->tx_skbuff[entry] = NULL;
1220 np->tx_ring[entry].frag[0].addr = 0;
1221 np->tx_ring[entry].frag[0].length = 0;
1222 }
1223 spin_unlock(&np->lock);
1224 } else {
1225 spin_lock(&np->lock);
1226 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1227 int entry = np->dirty_tx % TX_RING_SIZE;
1228 struct sk_buff *skb;
1229 if (!(le32_to_cpu(np->tx_ring[entry].status)
1230 & 0x00010000))
1231 break;
1232 skb = np->tx_skbuff[entry];
1233 /* Free the original skb. */
1234 pci_unmap_single(np->pci_dev,
1235 np->tx_ring[entry].frag[0].addr,
1236 skb->len, PCI_DMA_TODEVICE);
1237 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1238 np->tx_skbuff[entry] = NULL;
1239 np->tx_ring[entry].frag[0].addr = 0;
1240 np->tx_ring[entry].frag[0].length = 0;
1241 }
1242 spin_unlock(&np->lock);
1243 }
1244
1245 if (netif_queue_stopped(dev) &&
1246 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1247 /* The ring is no longer full, clear busy flag. */
1248 netif_wake_queue (dev);
1249 }
1250 /* Abnormal error summary/uncommon events handlers. */
1251 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1252 netdev_error(dev, intr_status);
1253 } while (0);
1254 if (netif_msg_intr(np))
1255 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1256 dev->name, ioread16(ioaddr + IntrStatus));
1257 return IRQ_RETVAL(handled);
1258 }
1259
1260 static void rx_poll(unsigned long data)
1261 {
1262 struct net_device *dev = (struct net_device *)data;
1263 struct netdev_private *np = netdev_priv(dev);
1264 int entry = np->cur_rx % RX_RING_SIZE;
1265 int boguscnt = np->budget;
1266 void __iomem *ioaddr = np->base;
1267 int received = 0;
1268
1269 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1270 while (1) {
1271 struct netdev_desc *desc = &(np->rx_ring[entry]);
1272 u32 frame_status = le32_to_cpu(desc->status);
1273 int pkt_len;
1274
1275 if (--boguscnt < 0) {
1276 goto not_done;
1277 }
1278 if (!(frame_status & DescOwn))
1279 break;
1280 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1281 if (netif_msg_rx_status(np))
1282 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1283 frame_status);
1284 if (frame_status & 0x001f4000) {
1285 /* There was a error. */
1286 if (netif_msg_rx_err(np))
1287 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1288 frame_status);
1289 np->stats.rx_errors++;
1290 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1291 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1292 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1293 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1294 if (frame_status & 0x00100000) {
1295 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1296 " status %8.8x.\n",
1297 dev->name, frame_status);
1298 }
1299 } else {
1300 struct sk_buff *skb;
1301 #ifndef final_version
1302 if (netif_msg_rx_status(np))
1303 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1304 ", bogus_cnt %d.\n",
1305 pkt_len, boguscnt);
1306 #endif
1307 /* Check if the packet is long enough to accept without copying
1308 to a minimally-sized skbuff. */
1309 if (pkt_len < rx_copybreak
1310 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1311 skb_reserve(skb, 2); /* 16 byte align the IP header */
1312 pci_dma_sync_single_for_cpu(np->pci_dev,
1313 desc->frag[0].addr,
1314 np->rx_buf_sz,
1315 PCI_DMA_FROMDEVICE);
1316
1317 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1318 pci_dma_sync_single_for_device(np->pci_dev,
1319 desc->frag[0].addr,
1320 np->rx_buf_sz,
1321 PCI_DMA_FROMDEVICE);
1322 skb_put(skb, pkt_len);
1323 } else {
1324 pci_unmap_single(np->pci_dev,
1325 desc->frag[0].addr,
1326 np->rx_buf_sz,
1327 PCI_DMA_FROMDEVICE);
1328 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1329 np->rx_skbuff[entry] = NULL;
1330 }
1331 skb->protocol = eth_type_trans(skb, dev);
1332 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1333 netif_rx(skb);
1334 dev->last_rx = jiffies;
1335 }
1336 entry = (entry + 1) % RX_RING_SIZE;
1337 received++;
1338 }
1339 np->cur_rx = entry;
1340 refill_rx (dev);
1341 np->budget -= received;
1342 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1343 return;
1344
1345 not_done:
1346 np->cur_rx = entry;
1347 refill_rx (dev);
1348 if (!received)
1349 received = 1;
1350 np->budget -= received;
1351 if (np->budget <= 0)
1352 np->budget = RX_BUDGET;
1353 tasklet_schedule(&np->rx_tasklet);
1354 return;
1355 }
1356
1357 static void refill_rx (struct net_device *dev)
1358 {
1359 struct netdev_private *np = netdev_priv(dev);
1360 int entry;
1361 int cnt = 0;
1362
1363 /* Refill the Rx ring buffers. */
1364 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1365 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1366 struct sk_buff *skb;
1367 entry = np->dirty_rx % RX_RING_SIZE;
1368 if (np->rx_skbuff[entry] == NULL) {
1369 skb = dev_alloc_skb(np->rx_buf_sz);
1370 np->rx_skbuff[entry] = skb;
1371 if (skb == NULL)
1372 break; /* Better luck next round. */
1373 skb->dev = dev; /* Mark as being used by this device. */
1374 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1375 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1376 pci_map_single(np->pci_dev, skb->data,
1377 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1378 }
1379 /* Perhaps we need not reset this field. */
1380 np->rx_ring[entry].frag[0].length =
1381 cpu_to_le32(np->rx_buf_sz | LastFrag);
1382 np->rx_ring[entry].status = 0;
1383 cnt++;
1384 }
1385 return;
1386 }
1387 static void netdev_error(struct net_device *dev, int intr_status)
1388 {
1389 struct netdev_private *np = netdev_priv(dev);
1390 void __iomem *ioaddr = np->base;
1391 u16 mii_ctl, mii_advertise, mii_lpa;
1392 int speed;
1393
1394 if (intr_status & LinkChange) {
1395 if (np->an_enable) {
1396 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1397 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1398 mii_advertise &= mii_lpa;
1399 printk (KERN_INFO "%s: Link changed: ", dev->name);
1400 if (mii_advertise & ADVERTISE_100FULL) {
1401 np->speed = 100;
1402 printk ("100Mbps, full duplex\n");
1403 } else if (mii_advertise & ADVERTISE_100HALF) {
1404 np->speed = 100;
1405 printk ("100Mbps, half duplex\n");
1406 } else if (mii_advertise & ADVERTISE_10FULL) {
1407 np->speed = 10;
1408 printk ("10Mbps, full duplex\n");
1409 } else if (mii_advertise & ADVERTISE_10HALF) {
1410 np->speed = 10;
1411 printk ("10Mbps, half duplex\n");
1412 } else
1413 printk ("\n");
1414
1415 } else {
1416 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1417 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1418 np->speed = speed;
1419 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1420 dev->name, speed);
1421 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1422 "full" : "half");
1423 }
1424 check_duplex (dev);
1425 if (np->flowctrl && np->mii_if.full_duplex) {
1426 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1427 ioaddr + MulticastFilter1+2);
1428 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1429 ioaddr + MACCtrl0);
1430 }
1431 }
1432 if (intr_status & StatsMax) {
1433 get_stats(dev);
1434 }
1435 if (intr_status & IntrPCIErr) {
1436 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1437 dev->name, intr_status);
1438 /* We must do a global reset of DMA to continue. */
1439 }
1440 }
1441
1442 static struct net_device_stats *get_stats(struct net_device *dev)
1443 {
1444 struct netdev_private *np = netdev_priv(dev);
1445 void __iomem *ioaddr = np->base;
1446 int i;
1447
1448 /* We should lock this segment of code for SMP eventually, although
1449 the vulnerability window is very small and statistics are
1450 non-critical. */
1451 /* The chip only need report frame silently dropped. */
1452 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1453 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1454 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1455 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1456 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1457 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1458 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1459 ioread8(ioaddr + StatsTxDefer);
1460 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1461 ioread8(ioaddr + i);
1462 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1463 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1464 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1465 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1466
1467 return &np->stats;
1468 }
1469
1470 static void set_rx_mode(struct net_device *dev)
1471 {
1472 struct netdev_private *np = netdev_priv(dev);
1473 void __iomem *ioaddr = np->base;
1474 u16 mc_filter[4]; /* Multicast hash filter */
1475 u32 rx_mode;
1476 int i;
1477
1478 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1479 memset(mc_filter, 0xff, sizeof(mc_filter));
1480 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1481 } else if ((dev->mc_count > multicast_filter_limit)
1482 || (dev->flags & IFF_ALLMULTI)) {
1483 /* Too many to match, or accept all multicasts. */
1484 memset(mc_filter, 0xff, sizeof(mc_filter));
1485 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1486 } else if (dev->mc_count) {
1487 struct dev_mc_list *mclist;
1488 int bit;
1489 int index;
1490 int crc;
1491 memset (mc_filter, 0, sizeof (mc_filter));
1492 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1493 i++, mclist = mclist->next) {
1494 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1495 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1496 if (crc & 0x80000000) index |= 1 << bit;
1497 mc_filter[index/16] |= (1 << (index % 16));
1498 }
1499 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1500 } else {
1501 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1502 return;
1503 }
1504 if (np->mii_if.full_duplex && np->flowctrl)
1505 mc_filter[3] |= 0x0200;
1506
1507 for (i = 0; i < 4; i++)
1508 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1509 iowrite8(rx_mode, ioaddr + RxMode);
1510 }
1511
1512 static int __set_mac_addr(struct net_device *dev)
1513 {
1514 struct netdev_private *np = netdev_priv(dev);
1515 u16 addr16;
1516
1517 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1518 iowrite16(addr16, np->base + StationAddr);
1519 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1520 iowrite16(addr16, np->base + StationAddr+2);
1521 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1522 iowrite16(addr16, np->base + StationAddr+4);
1523 return 0;
1524 }
1525
1526 static int check_if_running(struct net_device *dev)
1527 {
1528 if (!netif_running(dev))
1529 return -EINVAL;
1530 return 0;
1531 }
1532
1533 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1534 {
1535 struct netdev_private *np = netdev_priv(dev);
1536 strcpy(info->driver, DRV_NAME);
1537 strcpy(info->version, DRV_VERSION);
1538 strcpy(info->bus_info, pci_name(np->pci_dev));
1539 }
1540
1541 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1542 {
1543 struct netdev_private *np = netdev_priv(dev);
1544 spin_lock_irq(&np->lock);
1545 mii_ethtool_gset(&np->mii_if, ecmd);
1546 spin_unlock_irq(&np->lock);
1547 return 0;
1548 }
1549
1550 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1551 {
1552 struct netdev_private *np = netdev_priv(dev);
1553 int res;
1554 spin_lock_irq(&np->lock);
1555 res = mii_ethtool_sset(&np->mii_if, ecmd);
1556 spin_unlock_irq(&np->lock);
1557 return res;
1558 }
1559
1560 static int nway_reset(struct net_device *dev)
1561 {
1562 struct netdev_private *np = netdev_priv(dev);
1563 return mii_nway_restart(&np->mii_if);
1564 }
1565
1566 static u32 get_link(struct net_device *dev)
1567 {
1568 struct netdev_private *np = netdev_priv(dev);
1569 return mii_link_ok(&np->mii_if);
1570 }
1571
1572 static u32 get_msglevel(struct net_device *dev)
1573 {
1574 struct netdev_private *np = netdev_priv(dev);
1575 return np->msg_enable;
1576 }
1577
1578 static void set_msglevel(struct net_device *dev, u32 val)
1579 {
1580 struct netdev_private *np = netdev_priv(dev);
1581 np->msg_enable = val;
1582 }
1583
1584 static const struct ethtool_ops ethtool_ops = {
1585 .begin = check_if_running,
1586 .get_drvinfo = get_drvinfo,
1587 .get_settings = get_settings,
1588 .set_settings = set_settings,
1589 .nway_reset = nway_reset,
1590 .get_link = get_link,
1591 .get_msglevel = get_msglevel,
1592 .set_msglevel = set_msglevel,
1593 .get_perm_addr = ethtool_op_get_perm_addr,
1594 };
1595
1596 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1597 {
1598 struct netdev_private *np = netdev_priv(dev);
1599 void __iomem *ioaddr = np->base;
1600 int rc;
1601 int i;
1602
1603 if (!netif_running(dev))
1604 return -EINVAL;
1605
1606 spin_lock_irq(&np->lock);
1607 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1608 spin_unlock_irq(&np->lock);
1609 switch (cmd) {
1610 case SIOCDEVPRIVATE:
1611 for (i=0; i<TX_RING_SIZE; i++) {
1612 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1613 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1614 le32_to_cpu(np->tx_ring[i].next_desc),
1615 le32_to_cpu(np->tx_ring[i].status),
1616 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1617 & 0xff,
1618 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1619 le32_to_cpu(np->tx_ring[i].frag[0].length));
1620 }
1621 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1622 ioread32(np->base + TxListPtr),
1623 netif_queue_stopped(dev));
1624 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1625 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1626 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1627 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1628 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1629 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1630 return 0;
1631 }
1632
1633
1634 return rc;
1635 }
1636
1637 static int netdev_close(struct net_device *dev)
1638 {
1639 struct netdev_private *np = netdev_priv(dev);
1640 void __iomem *ioaddr = np->base;
1641 struct sk_buff *skb;
1642 int i;
1643
1644 /* Wait and kill tasklet */
1645 tasklet_kill(&np->rx_tasklet);
1646 tasklet_kill(&np->tx_tasklet);
1647 np->cur_tx = 0;
1648 np->dirty_tx = 0;
1649 np->cur_task = 0;
1650 np->last_tx = NULL;
1651
1652 netif_stop_queue(dev);
1653
1654 if (netif_msg_ifdown(np)) {
1655 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1656 "Rx %4.4x Int %2.2x.\n",
1657 dev->name, ioread8(ioaddr + TxStatus),
1658 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1659 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1660 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1661 }
1662
1663 /* Disable interrupts by clearing the interrupt mask. */
1664 iowrite16(0x0000, ioaddr + IntrEnable);
1665
1666 /* Disable Rx and Tx DMA for safely release resource */
1667 iowrite32(0x500, ioaddr + DMACtrl);
1668
1669 /* Stop the chip's Tx and Rx processes. */
1670 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1671
1672 for (i = 2000; i > 0; i--) {
1673 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1674 break;
1675 mdelay(1);
1676 }
1677
1678 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1679 ioaddr +ASICCtrl + 2);
1680
1681 for (i = 2000; i > 0; i--) {
1682 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1683 break;
1684 mdelay(1);
1685 }
1686
1687 #ifdef __i386__
1688 if (netif_msg_hw(np)) {
1689 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1690 (int)(np->tx_ring_dma));
1691 for (i = 0; i < TX_RING_SIZE; i++)
1692 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1693 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1694 np->tx_ring[i].frag[0].length);
1695 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1696 (int)(np->rx_ring_dma));
1697 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1698 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1699 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1700 np->rx_ring[i].frag[0].length);
1701 }
1702 }
1703 #endif /* __i386__ debugging only */
1704
1705 free_irq(dev->irq, dev);
1706
1707 del_timer_sync(&np->timer);
1708
1709 /* Free all the skbuffs in the Rx queue. */
1710 for (i = 0; i < RX_RING_SIZE; i++) {
1711 np->rx_ring[i].status = 0;
1712 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1713 skb = np->rx_skbuff[i];
1714 if (skb) {
1715 pci_unmap_single(np->pci_dev,
1716 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1717 PCI_DMA_FROMDEVICE);
1718 dev_kfree_skb(skb);
1719 np->rx_skbuff[i] = NULL;
1720 }
1721 }
1722 for (i = 0; i < TX_RING_SIZE; i++) {
1723 np->tx_ring[i].next_desc = 0;
1724 skb = np->tx_skbuff[i];
1725 if (skb) {
1726 pci_unmap_single(np->pci_dev,
1727 np->tx_ring[i].frag[0].addr, skb->len,
1728 PCI_DMA_TODEVICE);
1729 dev_kfree_skb(skb);
1730 np->tx_skbuff[i] = NULL;
1731 }
1732 }
1733
1734 return 0;
1735 }
1736
1737 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1738 {
1739 struct net_device *dev = pci_get_drvdata(pdev);
1740
1741 if (dev) {
1742 struct netdev_private *np = netdev_priv(dev);
1743
1744 unregister_netdev(dev);
1745 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1746 np->rx_ring_dma);
1747 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1748 np->tx_ring_dma);
1749 pci_iounmap(pdev, np->base);
1750 pci_release_regions(pdev);
1751 free_netdev(dev);
1752 pci_set_drvdata(pdev, NULL);
1753 }
1754 }
1755
1756 static struct pci_driver sundance_driver = {
1757 .name = DRV_NAME,
1758 .id_table = sundance_pci_tbl,
1759 .probe = sundance_probe1,
1760 .remove = __devexit_p(sundance_remove1),
1761 };
1762
1763 static int __init sundance_init(void)
1764 {
1765 /* when a module, this is printed whether or not devices are found in probe */
1766 #ifdef MODULE
1767 printk(version);
1768 #endif
1769 return pci_register_driver(&sundance_driver);
1770 }
1771
1772 static void __exit sundance_exit(void)
1773 {
1774 pci_unregister_driver(&sundance_driver);
1775 }
1776
1777 module_init(sundance_init);
1778 module_exit(sundance_exit);
1779
1780