Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / sundance.c
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20
21 */
22
23 #define DRV_NAME "sundance"
24 #define DRV_VERSION "1.1"
25 #define DRV_RELDATE "27-Jun-2006"
26
27
28 /* The user-configurable values.
29 These may be modified when a driver module is loaded.*/
30 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
31 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
32 Typical is a 64 element hash table based on the Ethernet CRC. */
33 static const int multicast_filter_limit = 32;
34
35 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
36 Setting to > 1518 effectively disables this feature.
37 This chip can receive into offset buffers, so the Alpha does not
38 need a copy-align. */
39 static int rx_copybreak;
40 static int flowctrl=1;
41
42 /* media[] specifies the media type the NIC operates at.
43 autosense Autosensing active media.
44 10mbps_hd 10Mbps half duplex.
45 10mbps_fd 10Mbps full duplex.
46 100mbps_hd 100Mbps half duplex.
47 100mbps_fd 100Mbps full duplex.
48 0 Autosensing active media.
49 1 10Mbps half duplex.
50 2 10Mbps full duplex.
51 3 100Mbps half duplex.
52 4 100Mbps full duplex.
53 */
54 #define MAX_UNITS 8
55 static char *media[MAX_UNITS];
56
57
58 /* Operational parameters that are set at compile time. */
59
60 /* Keep the ring sizes a power of two for compile efficiency.
61 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
62 Making the Tx ring too large decreases the effectiveness of channel
63 bonding and packet priority, and more than 128 requires modifying the
64 Tx error recovery.
65 Large receive rings merely waste memory. */
66 #define TX_RING_SIZE 32
67 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
68 #define RX_RING_SIZE 64
69 #define RX_BUDGET 32
70 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
71 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
72
73 /* Operational parameters that usually are not changed. */
74 /* Time in jiffies before concluding the transmitter is hung. */
75 #define TX_TIMEOUT (4*HZ)
76 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
77
78 /* Include files, designed to support most kernel versions 2.0.0 and later. */
79 #include <linux/module.h>
80 #include <linux/kernel.h>
81 #include <linux/string.h>
82 #include <linux/timer.h>
83 #include <linux/errno.h>
84 #include <linux/ioport.h>
85 #include <linux/slab.h>
86 #include <linux/interrupt.h>
87 #include <linux/pci.h>
88 #include <linux/netdevice.h>
89 #include <linux/etherdevice.h>
90 #include <linux/skbuff.h>
91 #include <linux/init.h>
92 #include <linux/bitops.h>
93 #include <asm/uaccess.h>
94 #include <asm/processor.h> /* Processor type for cache alignment. */
95 #include <asm/io.h>
96 #include <linux/delay.h>
97 #include <linux/spinlock.h>
98 #ifndef _COMPAT_WITH_OLD_KERNEL
99 #include <linux/crc32.h>
100 #include <linux/ethtool.h>
101 #include <linux/mii.h>
102 #else
103 #include "crc32.h"
104 #include "ethtool.h"
105 #include "mii.h"
106 #include "compat.h"
107 #endif
108
109 /* These identify the driver base version and may not be removed. */
110 static char version[] __devinitdata =
111 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
112 KERN_INFO " http://www.scyld.com/network/sundance.html\n";
113
114 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
115 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
116 MODULE_LICENSE("GPL");
117
118 module_param(debug, int, 0);
119 module_param(rx_copybreak, int, 0);
120 module_param_array(media, charp, NULL, 0);
121 module_param(flowctrl, int, 0);
122 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
123 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
124 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
125
126 /*
127 Theory of Operation
128
129 I. Board Compatibility
130
131 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
132
133 II. Board-specific settings
134
135 III. Driver operation
136
137 IIIa. Ring buffers
138
139 This driver uses two statically allocated fixed-size descriptor lists
140 formed into rings by a branch from the final descriptor to the beginning of
141 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
142 Some chips explicitly use only 2^N sized rings, while others use a
143 'next descriptor' pointer that the driver forms into rings.
144
145 IIIb/c. Transmit/Receive Structure
146
147 This driver uses a zero-copy receive and transmit scheme.
148 The driver allocates full frame size skbuffs for the Rx ring buffers at
149 open() time and passes the skb->data field to the chip as receive data
150 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
151 a fresh skbuff is allocated and the frame is copied to the new skbuff.
152 When the incoming frame is larger, the skbuff is passed directly up the
153 protocol stack. Buffers consumed this way are replaced by newly allocated
154 skbuffs in a later phase of receives.
155
156 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
157 using a full-sized skbuff for small frames vs. the copying costs of larger
158 frames. New boards are typically used in generously configured machines
159 and the underfilled buffers have negligible impact compared to the benefit of
160 a single allocation size, so the default value of zero results in never
161 copying packets. When copying is done, the cost is usually mitigated by using
162 a combined copy/checksum routine. Copying also preloads the cache, which is
163 most useful with small frames.
164
165 A subtle aspect of the operation is that the IP header at offset 14 in an
166 ethernet frame isn't longword aligned for further processing.
167 Unaligned buffers are permitted by the Sundance hardware, so
168 frames are received into the skbuff at an offset of "+2", 16-byte aligning
169 the IP header.
170
171 IIId. Synchronization
172
173 The driver runs as two independent, single-threaded flows of control. One
174 is the send-packet routine, which enforces single-threaded use by the
175 dev->tbusy flag. The other thread is the interrupt handler, which is single
176 threaded by the hardware and interrupt handling software.
177
178 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
179 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
180 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
181 the 'lp->tx_full' flag.
182
183 The interrupt handler has exclusive control over the Rx ring and records stats
184 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
185 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
186 clears both the tx_full and tbusy flags.
187
188 IV. Notes
189
190 IVb. References
191
192 The Sundance ST201 datasheet, preliminary version.
193 The Kendin KS8723 datasheet, preliminary version.
194 The ICplus IP100 datasheet, preliminary version.
195 http://www.scyld.com/expert/100mbps.html
196 http://www.scyld.com/expert/NWay.html
197
198 IVc. Errata
199
200 */
201
202 /* Work-around for Kendin chip bugs. */
203 #ifndef CONFIG_SUNDANCE_MMIO
204 #define USE_IO_OPS 1
205 #endif
206
207 static const struct pci_device_id sundance_pci_tbl[] = {
208 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
209 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
210 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
211 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
212 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
213 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
214 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
215 { }
216 };
217 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
218
219 enum {
220 netdev_io_size = 128
221 };
222
223 struct pci_id_info {
224 const char *name;
225 };
226 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
227 {"D-Link DFE-550TX FAST Ethernet Adapter"},
228 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
229 {"D-Link DFE-580TX 4 port Server Adapter"},
230 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
231 {"D-Link DL10050-based FAST Ethernet Adapter"},
232 {"Sundance Technology Alta"},
233 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
234 { } /* terminate list. */
235 };
236
237 /* This driver was written to use PCI memory space, however x86-oriented
238 hardware often uses I/O space accesses. */
239
240 /* Offsets to the device registers.
241 Unlike software-only systems, device drivers interact with complex hardware.
242 It's not useful to define symbolic names for every register bit in the
243 device. The name can only partially document the semantics and make
244 the driver longer and more difficult to read.
245 In general, only the important configuration values or bits changed
246 multiple times should be defined symbolically.
247 */
248 enum alta_offsets {
249 DMACtrl = 0x00,
250 TxListPtr = 0x04,
251 TxDMABurstThresh = 0x08,
252 TxDMAUrgentThresh = 0x09,
253 TxDMAPollPeriod = 0x0a,
254 RxDMAStatus = 0x0c,
255 RxListPtr = 0x10,
256 DebugCtrl0 = 0x1a,
257 DebugCtrl1 = 0x1c,
258 RxDMABurstThresh = 0x14,
259 RxDMAUrgentThresh = 0x15,
260 RxDMAPollPeriod = 0x16,
261 LEDCtrl = 0x1a,
262 ASICCtrl = 0x30,
263 EEData = 0x34,
264 EECtrl = 0x36,
265 TxStartThresh = 0x3c,
266 RxEarlyThresh = 0x3e,
267 FlashAddr = 0x40,
268 FlashData = 0x44,
269 TxStatus = 0x46,
270 TxFrameId = 0x47,
271 DownCounter = 0x18,
272 IntrClear = 0x4a,
273 IntrEnable = 0x4c,
274 IntrStatus = 0x4e,
275 MACCtrl0 = 0x50,
276 MACCtrl1 = 0x52,
277 StationAddr = 0x54,
278 MaxFrameSize = 0x5A,
279 RxMode = 0x5c,
280 MIICtrl = 0x5e,
281 MulticastFilter0 = 0x60,
282 MulticastFilter1 = 0x64,
283 RxOctetsLow = 0x68,
284 RxOctetsHigh = 0x6a,
285 TxOctetsLow = 0x6c,
286 TxOctetsHigh = 0x6e,
287 TxFramesOK = 0x70,
288 RxFramesOK = 0x72,
289 StatsCarrierError = 0x74,
290 StatsLateColl = 0x75,
291 StatsMultiColl = 0x76,
292 StatsOneColl = 0x77,
293 StatsTxDefer = 0x78,
294 RxMissed = 0x79,
295 StatsTxXSDefer = 0x7a,
296 StatsTxAbort = 0x7b,
297 StatsBcastTx = 0x7c,
298 StatsBcastRx = 0x7d,
299 StatsMcastTx = 0x7e,
300 StatsMcastRx = 0x7f,
301 /* Aliased and bogus values! */
302 RxStatus = 0x0c,
303 };
304 enum ASICCtrl_HiWord_bit {
305 GlobalReset = 0x0001,
306 RxReset = 0x0002,
307 TxReset = 0x0004,
308 DMAReset = 0x0008,
309 FIFOReset = 0x0010,
310 NetworkReset = 0x0020,
311 HostReset = 0x0040,
312 ResetBusy = 0x0400,
313 };
314
315 /* Bits in the interrupt status/mask registers. */
316 enum intr_status_bits {
317 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
318 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
319 IntrDrvRqst=0x0040,
320 StatsMax=0x0080, LinkChange=0x0100,
321 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
322 };
323
324 /* Bits in the RxMode register. */
325 enum rx_mode_bits {
326 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
327 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
328 };
329 /* Bits in MACCtrl. */
330 enum mac_ctrl0_bits {
331 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
332 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
333 };
334 enum mac_ctrl1_bits {
335 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
336 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
337 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
338 };
339
340 /* The Rx and Tx buffer descriptors. */
341 /* Note that using only 32 bit fields simplifies conversion to big-endian
342 architectures. */
343 struct netdev_desc {
344 u32 next_desc;
345 u32 status;
346 struct desc_frag { u32 addr, length; } frag[1];
347 };
348
349 /* Bits in netdev_desc.status */
350 enum desc_status_bits {
351 DescOwn=0x8000,
352 DescEndPacket=0x4000,
353 DescEndRing=0x2000,
354 LastFrag=0x80000000,
355 DescIntrOnTx=0x8000,
356 DescIntrOnDMADone=0x80000000,
357 DisableAlign = 0x00000001,
358 };
359
360 #define PRIV_ALIGN 15 /* Required alignment mask */
361 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
362 within the structure. */
363 #define MII_CNT 4
364 struct netdev_private {
365 /* Descriptor rings first for alignment. */
366 struct netdev_desc *rx_ring;
367 struct netdev_desc *tx_ring;
368 struct sk_buff* rx_skbuff[RX_RING_SIZE];
369 struct sk_buff* tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_ring_dma;
371 dma_addr_t rx_ring_dma;
372 struct net_device_stats stats;
373 struct timer_list timer; /* Media monitoring timer. */
374 /* Frequently used values: keep some adjacent for cache effect. */
375 spinlock_t lock;
376 spinlock_t rx_lock; /* Group with Tx control cache line. */
377 int msg_enable;
378 int chip_id;
379 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
380 unsigned int rx_buf_sz; /* Based on MTU+slack. */
381 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
382 unsigned int cur_tx, dirty_tx;
383 /* These values are keep track of the transceiver/media in use. */
384 unsigned int flowctrl:1;
385 unsigned int default_port:4; /* Last dev->if_port value. */
386 unsigned int an_enable:1;
387 unsigned int speed;
388 struct tasklet_struct rx_tasklet;
389 struct tasklet_struct tx_tasklet;
390 int budget;
391 int cur_task;
392 /* Multicast and receive mode. */
393 spinlock_t mcastlock; /* SMP lock multicast updates. */
394 u16 mcast_filter[4];
395 /* MII transceiver section. */
396 struct mii_if_info mii_if;
397 int mii_preamble_required;
398 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
399 struct pci_dev *pci_dev;
400 void __iomem *base;
401 unsigned char pci_rev_id;
402 };
403
404 /* The station address location in the EEPROM. */
405 #define EEPROM_SA_OFFSET 0x10
406 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
407 IntrDrvRqst | IntrTxDone | StatsMax | \
408 LinkChange)
409
410 static int change_mtu(struct net_device *dev, int new_mtu);
411 static int eeprom_read(void __iomem *ioaddr, int location);
412 static int mdio_read(struct net_device *dev, int phy_id, int location);
413 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
414 static int netdev_open(struct net_device *dev);
415 static void check_duplex(struct net_device *dev);
416 static void netdev_timer(unsigned long data);
417 static void tx_timeout(struct net_device *dev);
418 static void init_ring(struct net_device *dev);
419 static int start_tx(struct sk_buff *skb, struct net_device *dev);
420 static int reset_tx (struct net_device *dev);
421 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
422 static void rx_poll(unsigned long data);
423 static void tx_poll(unsigned long data);
424 static void refill_rx (struct net_device *dev);
425 static void netdev_error(struct net_device *dev, int intr_status);
426 static void netdev_error(struct net_device *dev, int intr_status);
427 static void set_rx_mode(struct net_device *dev);
428 static int __set_mac_addr(struct net_device *dev);
429 static struct net_device_stats *get_stats(struct net_device *dev);
430 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
431 static int netdev_close(struct net_device *dev);
432 static struct ethtool_ops ethtool_ops;
433
434 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
435 {
436 struct netdev_private *np = netdev_priv(dev);
437 void __iomem *ioaddr = np->base + ASICCtrl;
438 int countdown;
439
440 /* ST201 documentation states ASICCtrl is a 32bit register */
441 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
442 /* ST201 documentation states reset can take up to 1 ms */
443 countdown = 10 + 1;
444 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
445 if (--countdown == 0) {
446 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
447 break;
448 }
449 udelay(100);
450 }
451 }
452
453 static int __devinit sundance_probe1 (struct pci_dev *pdev,
454 const struct pci_device_id *ent)
455 {
456 struct net_device *dev;
457 struct netdev_private *np;
458 static int card_idx;
459 int chip_idx = ent->driver_data;
460 int irq;
461 int i;
462 void __iomem *ioaddr;
463 u16 mii_ctl;
464 void *ring_space;
465 dma_addr_t ring_dma;
466 #ifdef USE_IO_OPS
467 int bar = 0;
468 #else
469 int bar = 1;
470 #endif
471 int phy, phy_idx = 0;
472
473
474 /* when built into the kernel, we only print version if device is found */
475 #ifndef MODULE
476 static int printed_version;
477 if (!printed_version++)
478 printk(version);
479 #endif
480
481 if (pci_enable_device(pdev))
482 return -EIO;
483 pci_set_master(pdev);
484
485 irq = pdev->irq;
486
487 dev = alloc_etherdev(sizeof(*np));
488 if (!dev)
489 return -ENOMEM;
490 SET_MODULE_OWNER(dev);
491 SET_NETDEV_DEV(dev, &pdev->dev);
492
493 if (pci_request_regions(pdev, DRV_NAME))
494 goto err_out_netdev;
495
496 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
497 if (!ioaddr)
498 goto err_out_res;
499
500 for (i = 0; i < 3; i++)
501 ((u16 *)dev->dev_addr)[i] =
502 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
503 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
504
505 dev->base_addr = (unsigned long)ioaddr;
506 dev->irq = irq;
507
508 np = netdev_priv(dev);
509 np->base = ioaddr;
510 np->pci_dev = pdev;
511 np->chip_id = chip_idx;
512 np->msg_enable = (1 << debug) - 1;
513 spin_lock_init(&np->lock);
514 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
515 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
516
517 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
518 if (!ring_space)
519 goto err_out_cleardev;
520 np->tx_ring = (struct netdev_desc *)ring_space;
521 np->tx_ring_dma = ring_dma;
522
523 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
524 if (!ring_space)
525 goto err_out_unmap_tx;
526 np->rx_ring = (struct netdev_desc *)ring_space;
527 np->rx_ring_dma = ring_dma;
528
529 np->mii_if.dev = dev;
530 np->mii_if.mdio_read = mdio_read;
531 np->mii_if.mdio_write = mdio_write;
532 np->mii_if.phy_id_mask = 0x1f;
533 np->mii_if.reg_num_mask = 0x1f;
534
535 /* The chip-specific entries in the device structure. */
536 dev->open = &netdev_open;
537 dev->hard_start_xmit = &start_tx;
538 dev->stop = &netdev_close;
539 dev->get_stats = &get_stats;
540 dev->set_multicast_list = &set_rx_mode;
541 dev->do_ioctl = &netdev_ioctl;
542 SET_ETHTOOL_OPS(dev, &ethtool_ops);
543 dev->tx_timeout = &tx_timeout;
544 dev->watchdog_timeo = TX_TIMEOUT;
545 dev->change_mtu = &change_mtu;
546 pci_set_drvdata(pdev, dev);
547
548 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
549
550 i = register_netdev(dev);
551 if (i)
552 goto err_out_unmap_rx;
553
554 printk(KERN_INFO "%s: %s at %p, ",
555 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
556 for (i = 0; i < 5; i++)
557 printk("%2.2x:", dev->dev_addr[i]);
558 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
559
560 np->phys[0] = 1; /* Default setting */
561 np->mii_preamble_required++;
562 /*
563 * It seems some phys doesn't deal well with address 0 being accessed
564 * first, so leave address zero to the end of the loop (32 & 31).
565 */
566 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
567 int phyx = phy & 0x1f;
568 int mii_status = mdio_read(dev, phyx, MII_BMSR);
569 if (mii_status != 0xffff && mii_status != 0x0000) {
570 np->phys[phy_idx++] = phyx;
571 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
572 if ((mii_status & 0x0040) == 0)
573 np->mii_preamble_required++;
574 printk(KERN_INFO "%s: MII PHY found at address %d, status "
575 "0x%4.4x advertising %4.4x.\n",
576 dev->name, phyx, mii_status, np->mii_if.advertising);
577 }
578 }
579 np->mii_preamble_required--;
580
581 if (phy_idx == 0) {
582 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
583 dev->name, ioread32(ioaddr + ASICCtrl));
584 goto err_out_unregister;
585 }
586
587 np->mii_if.phy_id = np->phys[0];
588
589 /* Parse override configuration */
590 np->an_enable = 1;
591 if (card_idx < MAX_UNITS) {
592 if (media[card_idx] != NULL) {
593 np->an_enable = 0;
594 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
595 strcmp (media[card_idx], "4") == 0) {
596 np->speed = 100;
597 np->mii_if.full_duplex = 1;
598 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
599 || strcmp (media[card_idx], "3") == 0) {
600 np->speed = 100;
601 np->mii_if.full_duplex = 0;
602 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
603 strcmp (media[card_idx], "2") == 0) {
604 np->speed = 10;
605 np->mii_if.full_duplex = 1;
606 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
607 strcmp (media[card_idx], "1") == 0) {
608 np->speed = 10;
609 np->mii_if.full_duplex = 0;
610 } else {
611 np->an_enable = 1;
612 }
613 }
614 if (flowctrl == 1)
615 np->flowctrl = 1;
616 }
617
618 /* Fibre PHY? */
619 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
620 /* Default 100Mbps Full */
621 if (np->an_enable) {
622 np->speed = 100;
623 np->mii_if.full_duplex = 1;
624 np->an_enable = 0;
625 }
626 }
627 /* Reset PHY */
628 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
629 mdelay (300);
630 /* If flow control enabled, we need to advertise it.*/
631 if (np->flowctrl)
632 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
633 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
634 /* Force media type */
635 if (!np->an_enable) {
636 mii_ctl = 0;
637 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
638 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
639 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
640 printk (KERN_INFO "Override speed=%d, %s duplex\n",
641 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
642
643 }
644
645 /* Perhaps move the reset here? */
646 /* Reset the chip to erase previous misconfiguration. */
647 if (netif_msg_hw(np))
648 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
649 iowrite16(0x00ff, ioaddr + ASICCtrl + 2);
650 if (netif_msg_hw(np))
651 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
652
653 card_idx++;
654 return 0;
655
656 err_out_unregister:
657 unregister_netdev(dev);
658 err_out_unmap_rx:
659 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
660 err_out_unmap_tx:
661 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
662 err_out_cleardev:
663 pci_set_drvdata(pdev, NULL);
664 pci_iounmap(pdev, ioaddr);
665 err_out_res:
666 pci_release_regions(pdev);
667 err_out_netdev:
668 free_netdev (dev);
669 return -ENODEV;
670 }
671
672 static int change_mtu(struct net_device *dev, int new_mtu)
673 {
674 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
675 return -EINVAL;
676 if (netif_running(dev))
677 return -EBUSY;
678 dev->mtu = new_mtu;
679 return 0;
680 }
681
682 #define eeprom_delay(ee_addr) ioread32(ee_addr)
683 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
684 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
685 {
686 int boguscnt = 10000; /* Typical 1900 ticks. */
687 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
688 do {
689 eeprom_delay(ioaddr + EECtrl);
690 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
691 return ioread16(ioaddr + EEData);
692 }
693 } while (--boguscnt > 0);
694 return 0;
695 }
696
697 /* MII transceiver control section.
698 Read and write the MII registers using software-generated serial
699 MDIO protocol. See the MII specifications or DP83840A data sheet
700 for details.
701
702 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
703 met by back-to-back 33Mhz PCI cycles. */
704 #define mdio_delay() ioread8(mdio_addr)
705
706 enum mii_reg_bits {
707 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
708 };
709 #define MDIO_EnbIn (0)
710 #define MDIO_WRITE0 (MDIO_EnbOutput)
711 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
712
713 /* Generate the preamble required for initial synchronization and
714 a few older transceivers. */
715 static void mdio_sync(void __iomem *mdio_addr)
716 {
717 int bits = 32;
718
719 /* Establish sync by sending at least 32 logic ones. */
720 while (--bits >= 0) {
721 iowrite8(MDIO_WRITE1, mdio_addr);
722 mdio_delay();
723 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
724 mdio_delay();
725 }
726 }
727
728 static int mdio_read(struct net_device *dev, int phy_id, int location)
729 {
730 struct netdev_private *np = netdev_priv(dev);
731 void __iomem *mdio_addr = np->base + MIICtrl;
732 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
733 int i, retval = 0;
734
735 if (np->mii_preamble_required)
736 mdio_sync(mdio_addr);
737
738 /* Shift the read command bits out. */
739 for (i = 15; i >= 0; i--) {
740 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
741
742 iowrite8(dataval, mdio_addr);
743 mdio_delay();
744 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
745 mdio_delay();
746 }
747 /* Read the two transition, 16 data, and wire-idle bits. */
748 for (i = 19; i > 0; i--) {
749 iowrite8(MDIO_EnbIn, mdio_addr);
750 mdio_delay();
751 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
752 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
753 mdio_delay();
754 }
755 return (retval>>1) & 0xffff;
756 }
757
758 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
759 {
760 struct netdev_private *np = netdev_priv(dev);
761 void __iomem *mdio_addr = np->base + MIICtrl;
762 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
763 int i;
764
765 if (np->mii_preamble_required)
766 mdio_sync(mdio_addr);
767
768 /* Shift the command bits out. */
769 for (i = 31; i >= 0; i--) {
770 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
771
772 iowrite8(dataval, mdio_addr);
773 mdio_delay();
774 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
775 mdio_delay();
776 }
777 /* Clear out extra bits. */
778 for (i = 2; i > 0; i--) {
779 iowrite8(MDIO_EnbIn, mdio_addr);
780 mdio_delay();
781 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
782 mdio_delay();
783 }
784 return;
785 }
786
787 static int netdev_open(struct net_device *dev)
788 {
789 struct netdev_private *np = netdev_priv(dev);
790 void __iomem *ioaddr = np->base;
791 int i;
792
793 /* Do we need to reset the chip??? */
794
795 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
796 if (i)
797 return i;
798
799 if (netif_msg_ifup(np))
800 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
801 dev->name, dev->irq);
802 init_ring(dev);
803
804 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
805 /* The Tx list pointer is written as packets are queued. */
806
807 /* Initialize other registers. */
808 __set_mac_addr(dev);
809 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
810 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
811 #else
812 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
813 #endif
814 if (dev->mtu > 2047)
815 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
816
817 /* Configure the PCI bus bursts and FIFO thresholds. */
818
819 if (dev->if_port == 0)
820 dev->if_port = np->default_port;
821
822 spin_lock_init(&np->mcastlock);
823
824 set_rx_mode(dev);
825 iowrite16(0, ioaddr + IntrEnable);
826 iowrite16(0, ioaddr + DownCounter);
827 /* Set the chip to poll every N*320nsec. */
828 iowrite8(100, ioaddr + RxDMAPollPeriod);
829 iowrite8(127, ioaddr + TxDMAPollPeriod);
830 /* Fix DFE-580TX packet drop issue */
831 if (np->pci_rev_id >= 0x14)
832 iowrite8(0x01, ioaddr + DebugCtrl1);
833 netif_start_queue(dev);
834
835 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
836
837 if (netif_msg_ifup(np))
838 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
839 "MAC Control %x, %4.4x %4.4x.\n",
840 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
841 ioread32(ioaddr + MACCtrl0),
842 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
843
844 /* Set the timer to check for link beat. */
845 init_timer(&np->timer);
846 np->timer.expires = jiffies + 3*HZ;
847 np->timer.data = (unsigned long)dev;
848 np->timer.function = &netdev_timer; /* timer handler */
849 add_timer(&np->timer);
850
851 /* Enable interrupts by setting the interrupt mask. */
852 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
853
854 return 0;
855 }
856
857 static void check_duplex(struct net_device *dev)
858 {
859 struct netdev_private *np = netdev_priv(dev);
860 void __iomem *ioaddr = np->base;
861 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
862 int negotiated = mii_lpa & np->mii_if.advertising;
863 int duplex;
864
865 /* Force media */
866 if (!np->an_enable || mii_lpa == 0xffff) {
867 if (np->mii_if.full_duplex)
868 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
869 ioaddr + MACCtrl0);
870 return;
871 }
872
873 /* Autonegotiation */
874 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
875 if (np->mii_if.full_duplex != duplex) {
876 np->mii_if.full_duplex = duplex;
877 if (netif_msg_link(np))
878 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
879 "negotiated capability %4.4x.\n", dev->name,
880 duplex ? "full" : "half", np->phys[0], negotiated);
881 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
882 }
883 }
884
885 static void netdev_timer(unsigned long data)
886 {
887 struct net_device *dev = (struct net_device *)data;
888 struct netdev_private *np = netdev_priv(dev);
889 void __iomem *ioaddr = np->base;
890 int next_tick = 10*HZ;
891
892 if (netif_msg_timer(np)) {
893 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
894 "Tx %x Rx %x.\n",
895 dev->name, ioread16(ioaddr + IntrEnable),
896 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
897 }
898 check_duplex(dev);
899 np->timer.expires = jiffies + next_tick;
900 add_timer(&np->timer);
901 }
902
903 static void tx_timeout(struct net_device *dev)
904 {
905 struct netdev_private *np = netdev_priv(dev);
906 void __iomem *ioaddr = np->base;
907 unsigned long flag;
908
909 netif_stop_queue(dev);
910 tasklet_disable(&np->tx_tasklet);
911 iowrite16(0, ioaddr + IntrEnable);
912 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
913 "TxFrameId %2.2x,"
914 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
915 ioread8(ioaddr + TxFrameId));
916
917 {
918 int i;
919 for (i=0; i<TX_RING_SIZE; i++) {
920 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
921 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
922 le32_to_cpu(np->tx_ring[i].next_desc),
923 le32_to_cpu(np->tx_ring[i].status),
924 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
925 le32_to_cpu(np->tx_ring[i].frag[0].addr),
926 le32_to_cpu(np->tx_ring[i].frag[0].length));
927 }
928 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
929 ioread32(np->base + TxListPtr),
930 netif_queue_stopped(dev));
931 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
932 np->cur_tx, np->cur_tx % TX_RING_SIZE,
933 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
934 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
935 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
936 }
937 spin_lock_irqsave(&np->lock, flag);
938
939 /* Stop and restart the chip's Tx processes . */
940 reset_tx(dev);
941 spin_unlock_irqrestore(&np->lock, flag);
942
943 dev->if_port = 0;
944
945 dev->trans_start = jiffies;
946 np->stats.tx_errors++;
947 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
948 netif_wake_queue(dev);
949 }
950 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
951 tasklet_enable(&np->tx_tasklet);
952 }
953
954
955 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
956 static void init_ring(struct net_device *dev)
957 {
958 struct netdev_private *np = netdev_priv(dev);
959 int i;
960
961 np->cur_rx = np->cur_tx = 0;
962 np->dirty_rx = np->dirty_tx = 0;
963 np->cur_task = 0;
964
965 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
966
967 /* Initialize all Rx descriptors. */
968 for (i = 0; i < RX_RING_SIZE; i++) {
969 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
970 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
971 np->rx_ring[i].status = 0;
972 np->rx_ring[i].frag[0].length = 0;
973 np->rx_skbuff[i] = NULL;
974 }
975
976 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
977 for (i = 0; i < RX_RING_SIZE; i++) {
978 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
979 np->rx_skbuff[i] = skb;
980 if (skb == NULL)
981 break;
982 skb->dev = dev; /* Mark as being used by this device. */
983 skb_reserve(skb, 2); /* 16 byte align the IP header. */
984 np->rx_ring[i].frag[0].addr = cpu_to_le32(
985 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
986 PCI_DMA_FROMDEVICE));
987 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
988 }
989 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
990
991 for (i = 0; i < TX_RING_SIZE; i++) {
992 np->tx_skbuff[i] = NULL;
993 np->tx_ring[i].status = 0;
994 }
995 return;
996 }
997
998 static void tx_poll (unsigned long data)
999 {
1000 struct net_device *dev = (struct net_device *)data;
1001 struct netdev_private *np = netdev_priv(dev);
1002 unsigned head = np->cur_task % TX_RING_SIZE;
1003 struct netdev_desc *txdesc =
1004 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1005
1006 /* Chain the next pointer */
1007 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1008 int entry = np->cur_task % TX_RING_SIZE;
1009 txdesc = &np->tx_ring[entry];
1010 if (np->last_tx) {
1011 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1012 entry*sizeof(struct netdev_desc));
1013 }
1014 np->last_tx = txdesc;
1015 }
1016 /* Indicate the latest descriptor of tx ring */
1017 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1018
1019 if (ioread32 (np->base + TxListPtr) == 0)
1020 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1021 np->base + TxListPtr);
1022 return;
1023 }
1024
1025 static int
1026 start_tx (struct sk_buff *skb, struct net_device *dev)
1027 {
1028 struct netdev_private *np = netdev_priv(dev);
1029 struct netdev_desc *txdesc;
1030 unsigned entry;
1031
1032 /* Calculate the next Tx descriptor entry. */
1033 entry = np->cur_tx % TX_RING_SIZE;
1034 np->tx_skbuff[entry] = skb;
1035 txdesc = &np->tx_ring[entry];
1036
1037 txdesc->next_desc = 0;
1038 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1039 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1040 skb->len,
1041 PCI_DMA_TODEVICE));
1042 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1043
1044 /* Increment cur_tx before tasklet_schedule() */
1045 np->cur_tx++;
1046 mb();
1047 /* Schedule a tx_poll() task */
1048 tasklet_schedule(&np->tx_tasklet);
1049
1050 /* On some architectures: explicitly flush cache lines here. */
1051 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1052 && !netif_queue_stopped(dev)) {
1053 /* do nothing */
1054 } else {
1055 netif_stop_queue (dev);
1056 }
1057 dev->trans_start = jiffies;
1058 if (netif_msg_tx_queued(np)) {
1059 printk (KERN_DEBUG
1060 "%s: Transmit frame #%d queued in slot %d.\n",
1061 dev->name, np->cur_tx, entry);
1062 }
1063 return 0;
1064 }
1065
1066 /* Reset hardware tx and free all of tx buffers */
1067 static int
1068 reset_tx (struct net_device *dev)
1069 {
1070 struct netdev_private *np = netdev_priv(dev);
1071 void __iomem *ioaddr = np->base;
1072 struct sk_buff *skb;
1073 int i;
1074 int irq = in_interrupt();
1075
1076 /* Reset tx logic, TxListPtr will be cleaned */
1077 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1078 iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
1079 ioaddr + ASICCtrl + 2);
1080 for (i=50; i > 0; i--) {
1081 if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1082 break;
1083 mdelay(1);
1084 }
1085 /* free all tx skbuff */
1086 for (i = 0; i < TX_RING_SIZE; i++) {
1087 skb = np->tx_skbuff[i];
1088 if (skb) {
1089 pci_unmap_single(np->pci_dev,
1090 np->tx_ring[i].frag[0].addr, skb->len,
1091 PCI_DMA_TODEVICE);
1092 if (irq)
1093 dev_kfree_skb_irq (skb);
1094 else
1095 dev_kfree_skb (skb);
1096 np->tx_skbuff[i] = NULL;
1097 np->stats.tx_dropped++;
1098 }
1099 }
1100 np->cur_tx = np->dirty_tx = 0;
1101 np->cur_task = 0;
1102 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1103 return 0;
1104 }
1105
1106 /* The interrupt handler cleans up after the Tx thread,
1107 and schedule a Rx thread work */
1108 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1109 {
1110 struct net_device *dev = (struct net_device *)dev_instance;
1111 struct netdev_private *np = netdev_priv(dev);
1112 void __iomem *ioaddr = np->base;
1113 int hw_frame_id;
1114 int tx_cnt;
1115 int tx_status;
1116 int handled = 0;
1117
1118
1119 do {
1120 int intr_status = ioread16(ioaddr + IntrStatus);
1121 iowrite16(intr_status, ioaddr + IntrStatus);
1122
1123 if (netif_msg_intr(np))
1124 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1125 dev->name, intr_status);
1126
1127 if (!(intr_status & DEFAULT_INTR))
1128 break;
1129
1130 handled = 1;
1131
1132 if (intr_status & (IntrRxDMADone)) {
1133 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1134 ioaddr + IntrEnable);
1135 if (np->budget < 0)
1136 np->budget = RX_BUDGET;
1137 tasklet_schedule(&np->rx_tasklet);
1138 }
1139 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1140 tx_status = ioread16 (ioaddr + TxStatus);
1141 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1142 if (netif_msg_tx_done(np))
1143 printk
1144 ("%s: Transmit status is %2.2x.\n",
1145 dev->name, tx_status);
1146 if (tx_status & 0x1e) {
1147 if (netif_msg_tx_err(np))
1148 printk("%s: Transmit error status %4.4x.\n",
1149 dev->name, tx_status);
1150 np->stats.tx_errors++;
1151 if (tx_status & 0x10)
1152 np->stats.tx_fifo_errors++;
1153 if (tx_status & 0x08)
1154 np->stats.collisions++;
1155 if (tx_status & 0x04)
1156 np->stats.tx_fifo_errors++;
1157 if (tx_status & 0x02)
1158 np->stats.tx_window_errors++;
1159 /*
1160 ** This reset has been verified on
1161 ** DFE-580TX boards ! phdm@macqel.be.
1162 */
1163 if (tx_status & 0x10) { /* TxUnderrun */
1164 unsigned short txthreshold;
1165
1166 txthreshold = ioread16 (ioaddr + TxStartThresh);
1167 /* Restart Tx FIFO and transmitter */
1168 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1169 iowrite16 (txthreshold, ioaddr + TxStartThresh);
1170 /* No need to reset the Tx pointer here */
1171 }
1172 /* Restart the Tx. */
1173 iowrite16 (TxEnable, ioaddr + MACCtrl1);
1174 }
1175 /* Yup, this is a documentation bug. It cost me *hours*. */
1176 iowrite16 (0, ioaddr + TxStatus);
1177 if (tx_cnt < 0) {
1178 iowrite32(5000, ioaddr + DownCounter);
1179 break;
1180 }
1181 tx_status = ioread16 (ioaddr + TxStatus);
1182 }
1183 hw_frame_id = (tx_status >> 8) & 0xff;
1184 } else {
1185 hw_frame_id = ioread8(ioaddr + TxFrameId);
1186 }
1187
1188 if (np->pci_rev_id >= 0x14) {
1189 spin_lock(&np->lock);
1190 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1191 int entry = np->dirty_tx % TX_RING_SIZE;
1192 struct sk_buff *skb;
1193 int sw_frame_id;
1194 sw_frame_id = (le32_to_cpu(
1195 np->tx_ring[entry].status) >> 2) & 0xff;
1196 if (sw_frame_id == hw_frame_id &&
1197 !(le32_to_cpu(np->tx_ring[entry].status)
1198 & 0x00010000))
1199 break;
1200 if (sw_frame_id == (hw_frame_id + 1) %
1201 TX_RING_SIZE)
1202 break;
1203 skb = np->tx_skbuff[entry];
1204 /* Free the original skb. */
1205 pci_unmap_single(np->pci_dev,
1206 np->tx_ring[entry].frag[0].addr,
1207 skb->len, PCI_DMA_TODEVICE);
1208 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1209 np->tx_skbuff[entry] = NULL;
1210 np->tx_ring[entry].frag[0].addr = 0;
1211 np->tx_ring[entry].frag[0].length = 0;
1212 }
1213 spin_unlock(&np->lock);
1214 } else {
1215 spin_lock(&np->lock);
1216 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1217 int entry = np->dirty_tx % TX_RING_SIZE;
1218 struct sk_buff *skb;
1219 if (!(le32_to_cpu(np->tx_ring[entry].status)
1220 & 0x00010000))
1221 break;
1222 skb = np->tx_skbuff[entry];
1223 /* Free the original skb. */
1224 pci_unmap_single(np->pci_dev,
1225 np->tx_ring[entry].frag[0].addr,
1226 skb->len, PCI_DMA_TODEVICE);
1227 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1228 np->tx_skbuff[entry] = NULL;
1229 np->tx_ring[entry].frag[0].addr = 0;
1230 np->tx_ring[entry].frag[0].length = 0;
1231 }
1232 spin_unlock(&np->lock);
1233 }
1234
1235 if (netif_queue_stopped(dev) &&
1236 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1237 /* The ring is no longer full, clear busy flag. */
1238 netif_wake_queue (dev);
1239 }
1240 /* Abnormal error summary/uncommon events handlers. */
1241 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1242 netdev_error(dev, intr_status);
1243 } while (0);
1244 if (netif_msg_intr(np))
1245 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1246 dev->name, ioread16(ioaddr + IntrStatus));
1247 return IRQ_RETVAL(handled);
1248 }
1249
1250 static void rx_poll(unsigned long data)
1251 {
1252 struct net_device *dev = (struct net_device *)data;
1253 struct netdev_private *np = netdev_priv(dev);
1254 int entry = np->cur_rx % RX_RING_SIZE;
1255 int boguscnt = np->budget;
1256 void __iomem *ioaddr = np->base;
1257 int received = 0;
1258
1259 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1260 while (1) {
1261 struct netdev_desc *desc = &(np->rx_ring[entry]);
1262 u32 frame_status = le32_to_cpu(desc->status);
1263 int pkt_len;
1264
1265 if (--boguscnt < 0) {
1266 goto not_done;
1267 }
1268 if (!(frame_status & DescOwn))
1269 break;
1270 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1271 if (netif_msg_rx_status(np))
1272 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1273 frame_status);
1274 if (frame_status & 0x001f4000) {
1275 /* There was a error. */
1276 if (netif_msg_rx_err(np))
1277 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1278 frame_status);
1279 np->stats.rx_errors++;
1280 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1281 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1282 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1283 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1284 if (frame_status & 0x00100000) {
1285 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1286 " status %8.8x.\n",
1287 dev->name, frame_status);
1288 }
1289 } else {
1290 struct sk_buff *skb;
1291 #ifndef final_version
1292 if (netif_msg_rx_status(np))
1293 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1294 ", bogus_cnt %d.\n",
1295 pkt_len, boguscnt);
1296 #endif
1297 /* Check if the packet is long enough to accept without copying
1298 to a minimally-sized skbuff. */
1299 if (pkt_len < rx_copybreak
1300 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1301 skb->dev = dev;
1302 skb_reserve(skb, 2); /* 16 byte align the IP header */
1303 pci_dma_sync_single_for_cpu(np->pci_dev,
1304 desc->frag[0].addr,
1305 np->rx_buf_sz,
1306 PCI_DMA_FROMDEVICE);
1307
1308 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1309 pci_dma_sync_single_for_device(np->pci_dev,
1310 desc->frag[0].addr,
1311 np->rx_buf_sz,
1312 PCI_DMA_FROMDEVICE);
1313 skb_put(skb, pkt_len);
1314 } else {
1315 pci_unmap_single(np->pci_dev,
1316 desc->frag[0].addr,
1317 np->rx_buf_sz,
1318 PCI_DMA_FROMDEVICE);
1319 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1320 np->rx_skbuff[entry] = NULL;
1321 }
1322 skb->protocol = eth_type_trans(skb, dev);
1323 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1324 netif_rx(skb);
1325 dev->last_rx = jiffies;
1326 }
1327 entry = (entry + 1) % RX_RING_SIZE;
1328 received++;
1329 }
1330 np->cur_rx = entry;
1331 refill_rx (dev);
1332 np->budget -= received;
1333 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1334 return;
1335
1336 not_done:
1337 np->cur_rx = entry;
1338 refill_rx (dev);
1339 if (!received)
1340 received = 1;
1341 np->budget -= received;
1342 if (np->budget <= 0)
1343 np->budget = RX_BUDGET;
1344 tasklet_schedule(&np->rx_tasklet);
1345 return;
1346 }
1347
1348 static void refill_rx (struct net_device *dev)
1349 {
1350 struct netdev_private *np = netdev_priv(dev);
1351 int entry;
1352 int cnt = 0;
1353
1354 /* Refill the Rx ring buffers. */
1355 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1356 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1357 struct sk_buff *skb;
1358 entry = np->dirty_rx % RX_RING_SIZE;
1359 if (np->rx_skbuff[entry] == NULL) {
1360 skb = dev_alloc_skb(np->rx_buf_sz);
1361 np->rx_skbuff[entry] = skb;
1362 if (skb == NULL)
1363 break; /* Better luck next round. */
1364 skb->dev = dev; /* Mark as being used by this device. */
1365 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1366 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1367 pci_map_single(np->pci_dev, skb->data,
1368 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1369 }
1370 /* Perhaps we need not reset this field. */
1371 np->rx_ring[entry].frag[0].length =
1372 cpu_to_le32(np->rx_buf_sz | LastFrag);
1373 np->rx_ring[entry].status = 0;
1374 cnt++;
1375 }
1376 return;
1377 }
1378 static void netdev_error(struct net_device *dev, int intr_status)
1379 {
1380 struct netdev_private *np = netdev_priv(dev);
1381 void __iomem *ioaddr = np->base;
1382 u16 mii_ctl, mii_advertise, mii_lpa;
1383 int speed;
1384
1385 if (intr_status & LinkChange) {
1386 if (np->an_enable) {
1387 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1388 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1389 mii_advertise &= mii_lpa;
1390 printk (KERN_INFO "%s: Link changed: ", dev->name);
1391 if (mii_advertise & ADVERTISE_100FULL) {
1392 np->speed = 100;
1393 printk ("100Mbps, full duplex\n");
1394 } else if (mii_advertise & ADVERTISE_100HALF) {
1395 np->speed = 100;
1396 printk ("100Mbps, half duplex\n");
1397 } else if (mii_advertise & ADVERTISE_10FULL) {
1398 np->speed = 10;
1399 printk ("10Mbps, full duplex\n");
1400 } else if (mii_advertise & ADVERTISE_10HALF) {
1401 np->speed = 10;
1402 printk ("10Mbps, half duplex\n");
1403 } else
1404 printk ("\n");
1405
1406 } else {
1407 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1408 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1409 np->speed = speed;
1410 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1411 dev->name, speed);
1412 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1413 "full" : "half");
1414 }
1415 check_duplex (dev);
1416 if (np->flowctrl && np->mii_if.full_duplex) {
1417 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1418 ioaddr + MulticastFilter1+2);
1419 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1420 ioaddr + MACCtrl0);
1421 }
1422 }
1423 if (intr_status & StatsMax) {
1424 get_stats(dev);
1425 }
1426 if (intr_status & IntrPCIErr) {
1427 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1428 dev->name, intr_status);
1429 /* We must do a global reset of DMA to continue. */
1430 }
1431 }
1432
1433 static struct net_device_stats *get_stats(struct net_device *dev)
1434 {
1435 struct netdev_private *np = netdev_priv(dev);
1436 void __iomem *ioaddr = np->base;
1437 int i;
1438
1439 /* We should lock this segment of code for SMP eventually, although
1440 the vulnerability window is very small and statistics are
1441 non-critical. */
1442 /* The chip only need report frame silently dropped. */
1443 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1444 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1445 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1446 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1447 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1448 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1449 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1450 ioread8(ioaddr + StatsTxDefer);
1451 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1452 ioread8(ioaddr + i);
1453 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1454 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1455 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1456 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1457
1458 return &np->stats;
1459 }
1460
1461 static void set_rx_mode(struct net_device *dev)
1462 {
1463 struct netdev_private *np = netdev_priv(dev);
1464 void __iomem *ioaddr = np->base;
1465 u16 mc_filter[4]; /* Multicast hash filter */
1466 u32 rx_mode;
1467 int i;
1468
1469 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1470 /* Unconditionally log net taps. */
1471 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1472 memset(mc_filter, 0xff, sizeof(mc_filter));
1473 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1474 } else if ((dev->mc_count > multicast_filter_limit)
1475 || (dev->flags & IFF_ALLMULTI)) {
1476 /* Too many to match, or accept all multicasts. */
1477 memset(mc_filter, 0xff, sizeof(mc_filter));
1478 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1479 } else if (dev->mc_count) {
1480 struct dev_mc_list *mclist;
1481 int bit;
1482 int index;
1483 int crc;
1484 memset (mc_filter, 0, sizeof (mc_filter));
1485 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1486 i++, mclist = mclist->next) {
1487 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1488 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1489 if (crc & 0x80000000) index |= 1 << bit;
1490 mc_filter[index/16] |= (1 << (index % 16));
1491 }
1492 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1493 } else {
1494 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1495 return;
1496 }
1497 if (np->mii_if.full_duplex && np->flowctrl)
1498 mc_filter[3] |= 0x0200;
1499
1500 for (i = 0; i < 4; i++)
1501 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1502 iowrite8(rx_mode, ioaddr + RxMode);
1503 }
1504
1505 static int __set_mac_addr(struct net_device *dev)
1506 {
1507 struct netdev_private *np = netdev_priv(dev);
1508 u16 addr16;
1509
1510 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1511 iowrite16(addr16, np->base + StationAddr);
1512 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1513 iowrite16(addr16, np->base + StationAddr+2);
1514 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1515 iowrite16(addr16, np->base + StationAddr+4);
1516 return 0;
1517 }
1518
1519 static int check_if_running(struct net_device *dev)
1520 {
1521 if (!netif_running(dev))
1522 return -EINVAL;
1523 return 0;
1524 }
1525
1526 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1527 {
1528 struct netdev_private *np = netdev_priv(dev);
1529 strcpy(info->driver, DRV_NAME);
1530 strcpy(info->version, DRV_VERSION);
1531 strcpy(info->bus_info, pci_name(np->pci_dev));
1532 }
1533
1534 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1535 {
1536 struct netdev_private *np = netdev_priv(dev);
1537 spin_lock_irq(&np->lock);
1538 mii_ethtool_gset(&np->mii_if, ecmd);
1539 spin_unlock_irq(&np->lock);
1540 return 0;
1541 }
1542
1543 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1544 {
1545 struct netdev_private *np = netdev_priv(dev);
1546 int res;
1547 spin_lock_irq(&np->lock);
1548 res = mii_ethtool_sset(&np->mii_if, ecmd);
1549 spin_unlock_irq(&np->lock);
1550 return res;
1551 }
1552
1553 static int nway_reset(struct net_device *dev)
1554 {
1555 struct netdev_private *np = netdev_priv(dev);
1556 return mii_nway_restart(&np->mii_if);
1557 }
1558
1559 static u32 get_link(struct net_device *dev)
1560 {
1561 struct netdev_private *np = netdev_priv(dev);
1562 return mii_link_ok(&np->mii_if);
1563 }
1564
1565 static u32 get_msglevel(struct net_device *dev)
1566 {
1567 struct netdev_private *np = netdev_priv(dev);
1568 return np->msg_enable;
1569 }
1570
1571 static void set_msglevel(struct net_device *dev, u32 val)
1572 {
1573 struct netdev_private *np = netdev_priv(dev);
1574 np->msg_enable = val;
1575 }
1576
1577 static struct ethtool_ops ethtool_ops = {
1578 .begin = check_if_running,
1579 .get_drvinfo = get_drvinfo,
1580 .get_settings = get_settings,
1581 .set_settings = set_settings,
1582 .nway_reset = nway_reset,
1583 .get_link = get_link,
1584 .get_msglevel = get_msglevel,
1585 .set_msglevel = set_msglevel,
1586 .get_perm_addr = ethtool_op_get_perm_addr,
1587 };
1588
1589 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1590 {
1591 struct netdev_private *np = netdev_priv(dev);
1592 void __iomem *ioaddr = np->base;
1593 int rc;
1594 int i;
1595
1596 if (!netif_running(dev))
1597 return -EINVAL;
1598
1599 spin_lock_irq(&np->lock);
1600 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1601 spin_unlock_irq(&np->lock);
1602 switch (cmd) {
1603 case SIOCDEVPRIVATE:
1604 for (i=0; i<TX_RING_SIZE; i++) {
1605 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1606 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1607 le32_to_cpu(np->tx_ring[i].next_desc),
1608 le32_to_cpu(np->tx_ring[i].status),
1609 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1610 & 0xff,
1611 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1612 le32_to_cpu(np->tx_ring[i].frag[0].length));
1613 }
1614 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1615 ioread32(np->base + TxListPtr),
1616 netif_queue_stopped(dev));
1617 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1618 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1619 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1620 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1621 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1622 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1623 return 0;
1624 }
1625
1626
1627 return rc;
1628 }
1629
1630 static int netdev_close(struct net_device *dev)
1631 {
1632 struct netdev_private *np = netdev_priv(dev);
1633 void __iomem *ioaddr = np->base;
1634 struct sk_buff *skb;
1635 int i;
1636
1637 netif_stop_queue(dev);
1638
1639 if (netif_msg_ifdown(np)) {
1640 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1641 "Rx %4.4x Int %2.2x.\n",
1642 dev->name, ioread8(ioaddr + TxStatus),
1643 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1644 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1645 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1646 }
1647
1648 /* Disable interrupts by clearing the interrupt mask. */
1649 iowrite16(0x0000, ioaddr + IntrEnable);
1650
1651 /* Stop the chip's Tx and Rx processes. */
1652 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1653
1654 /* Wait and kill tasklet */
1655 tasklet_kill(&np->rx_tasklet);
1656 tasklet_kill(&np->tx_tasklet);
1657
1658 #ifdef __i386__
1659 if (netif_msg_hw(np)) {
1660 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1661 (int)(np->tx_ring_dma));
1662 for (i = 0; i < TX_RING_SIZE; i++)
1663 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1664 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1665 np->tx_ring[i].frag[0].length);
1666 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1667 (int)(np->rx_ring_dma));
1668 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1669 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1670 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1671 np->rx_ring[i].frag[0].length);
1672 }
1673 }
1674 #endif /* __i386__ debugging only */
1675
1676 free_irq(dev->irq, dev);
1677
1678 del_timer_sync(&np->timer);
1679
1680 /* Free all the skbuffs in the Rx queue. */
1681 for (i = 0; i < RX_RING_SIZE; i++) {
1682 np->rx_ring[i].status = 0;
1683 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1684 skb = np->rx_skbuff[i];
1685 if (skb) {
1686 pci_unmap_single(np->pci_dev,
1687 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1688 PCI_DMA_FROMDEVICE);
1689 dev_kfree_skb(skb);
1690 np->rx_skbuff[i] = NULL;
1691 }
1692 }
1693 for (i = 0; i < TX_RING_SIZE; i++) {
1694 skb = np->tx_skbuff[i];
1695 if (skb) {
1696 pci_unmap_single(np->pci_dev,
1697 np->tx_ring[i].frag[0].addr, skb->len,
1698 PCI_DMA_TODEVICE);
1699 dev_kfree_skb(skb);
1700 np->tx_skbuff[i] = NULL;
1701 }
1702 }
1703
1704 return 0;
1705 }
1706
1707 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1708 {
1709 struct net_device *dev = pci_get_drvdata(pdev);
1710
1711 if (dev) {
1712 struct netdev_private *np = netdev_priv(dev);
1713
1714 unregister_netdev(dev);
1715 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1716 np->rx_ring_dma);
1717 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1718 np->tx_ring_dma);
1719 pci_iounmap(pdev, np->base);
1720 pci_release_regions(pdev);
1721 free_netdev(dev);
1722 pci_set_drvdata(pdev, NULL);
1723 }
1724 }
1725
1726 static struct pci_driver sundance_driver = {
1727 .name = DRV_NAME,
1728 .id_table = sundance_pci_tbl,
1729 .probe = sundance_probe1,
1730 .remove = __devexit_p(sundance_remove1),
1731 };
1732
1733 static int __init sundance_init(void)
1734 {
1735 /* when a module, this is printed whether or not devices are found in probe */
1736 #ifdef MODULE
1737 printk(version);
1738 #endif
1739 return pci_module_init(&sundance_driver);
1740 }
1741
1742 static void __exit sundance_exit(void)
1743 {
1744 pci_unregister_driver(&sundance_driver);
1745 }
1746
1747 module_init(sundance_init);
1748 module_exit(sundance_exit);
1749
1750