drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / dlink / sundance.c
CommitLineData
1da177e4
LT
1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
03a8c661 19 [link no longer provides useful info -jgarzik]
e714d99c
PDM
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
1da177e4 22
1da177e4
LT
23*/
24
25#define DRV_NAME "sundance"
d5b20697
AG
26#define DRV_VERSION "1.2"
27#define DRV_RELDATE "11-Sep-2006"
1da177e4
LT
28
29
30/* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
f71e1309 35static const int multicast_filter_limit = 32;
1da177e4
LT
36
37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41static int rx_copybreak;
42static int flowctrl=1;
43
44/* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55*/
56#define MAX_UNITS 8
57static char *media[MAX_UNITS];
58
59
60/* Operational parameters that are set at compile time. */
61
62/* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68#define TX_RING_SIZE 32
69#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70#define RX_RING_SIZE 64
71#define RX_BUDGET 32
72#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75/* Operational parameters that usually are not changed. */
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80/* Include files, designed to support most kernel versions 2.0.0 and later. */
81#include <linux/module.h>
82#include <linux/kernel.h>
83#include <linux/string.h>
84#include <linux/timer.h>
85#include <linux/errno.h>
86#include <linux/ioport.h>
1da177e4
LT
87#include <linux/interrupt.h>
88#include <linux/pci.h>
89#include <linux/netdevice.h>
90#include <linux/etherdevice.h>
91#include <linux/skbuff.h>
92#include <linux/init.h>
93#include <linux/bitops.h>
94#include <asm/uaccess.h>
95#include <asm/processor.h> /* Processor type for cache alignment. */
96#include <asm/io.h>
97#include <linux/delay.h>
98#include <linux/spinlock.h>
0c8a745f 99#include <linux/dma-mapping.h>
1da177e4
LT
100#include <linux/crc32.h>
101#include <linux/ethtool.h>
102#include <linux/mii.h>
1da177e4
LT
103
104/* These identify the driver base version and may not be removed. */
64bc40de 105static const char version[] =
3af0fe39
SH
106 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 " Written by Donald Becker\n";
1da177e4
LT
108
109MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111MODULE_LICENSE("GPL");
112
113module_param(debug, int, 0);
114module_param(rx_copybreak, int, 0);
115module_param_array(media, charp, NULL, 0);
116module_param(flowctrl, int, 0);
117MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
120
121/*
122 Theory of Operation
123
124I. Board Compatibility
125
126This driver is designed for the Sundance Technologies "Alta" ST201 chip.
127
128II. Board-specific settings
129
130III. Driver operation
131
132IIIa. Ring buffers
133
134This driver uses two statically allocated fixed-size descriptor lists
135formed into rings by a branch from the final descriptor to the beginning of
136the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
137Some chips explicitly use only 2^N sized rings, while others use a
138'next descriptor' pointer that the driver forms into rings.
139
140IIIb/c. Transmit/Receive Structure
141
142This driver uses a zero-copy receive and transmit scheme.
143The driver allocates full frame size skbuffs for the Rx ring buffers at
144open() time and passes the skb->data field to the chip as receive data
145buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
146a fresh skbuff is allocated and the frame is copied to the new skbuff.
147When the incoming frame is larger, the skbuff is passed directly up the
148protocol stack. Buffers consumed this way are replaced by newly allocated
149skbuffs in a later phase of receives.
150
151The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152using a full-sized skbuff for small frames vs. the copying costs of larger
153frames. New boards are typically used in generously configured machines
154and the underfilled buffers have negligible impact compared to the benefit of
155a single allocation size, so the default value of zero results in never
156copying packets. When copying is done, the cost is usually mitigated by using
157a combined copy/checksum routine. Copying also preloads the cache, which is
158most useful with small frames.
159
160A subtle aspect of the operation is that the IP header at offset 14 in an
161ethernet frame isn't longword aligned for further processing.
162Unaligned buffers are permitted by the Sundance hardware, so
163frames are received into the skbuff at an offset of "+2", 16-byte aligning
164the IP header.
165
166IIId. Synchronization
167
168The driver runs as two independent, single-threaded flows of control. One
169is the send-packet routine, which enforces single-threaded use by the
170dev->tbusy flag. The other thread is the interrupt handler, which is single
171threaded by the hardware and interrupt handling software.
172
173The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176the 'lp->tx_full' flag.
177
178The interrupt handler has exclusive control over the Rx ring and records stats
179from the Tx ring. After reaping the stats, it marks the Tx queue entry as
180empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181clears both the tx_full and tbusy flags.
182
183IV. Notes
184
185IVb. References
186
187The Sundance ST201 datasheet, preliminary version.
b71b95ef
PDM
188The Kendin KS8723 datasheet, preliminary version.
189The ICplus IP100 datasheet, preliminary version.
190http://www.scyld.com/expert/100mbps.html
191http://www.scyld.com/expert/NWay.html
1da177e4
LT
192
193IVc. Errata
194
195*/
196
197/* Work-around for Kendin chip bugs. */
198#ifndef CONFIG_SUNDANCE_MMIO
199#define USE_IO_OPS 1
200#endif
201
a3aa1884 202static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
46009c8b
JG
203 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
210 { }
1da177e4
LT
211};
212MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
213
214enum {
215 netdev_io_size = 128
216};
217
218struct pci_id_info {
219 const char *name;
220};
64bc40de 221static const struct pci_id_info pci_id_tbl[] = {
1da177e4
LT
222 {"D-Link DFE-550TX FAST Ethernet Adapter"},
223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 {"D-Link DFE-580TX 4 port Server Adapter"},
225 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 {"D-Link DL10050-based FAST Ethernet Adapter"},
227 {"Sundance Technology Alta"},
1668b19f 228 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
46009c8b 229 { } /* terminate list. */
1da177e4
LT
230};
231
232/* This driver was written to use PCI memory space, however x86-oriented
233 hardware often uses I/O space accesses. */
234
235/* Offsets to the device registers.
236 Unlike software-only systems, device drivers interact with complex hardware.
237 It's not useful to define symbolic names for every register bit in the
238 device. The name can only partially document the semantics and make
239 the driver longer and more difficult to read.
240 In general, only the important configuration values or bits changed
241 multiple times should be defined symbolically.
242*/
243enum alta_offsets {
244 DMACtrl = 0x00,
245 TxListPtr = 0x04,
246 TxDMABurstThresh = 0x08,
247 TxDMAUrgentThresh = 0x09,
248 TxDMAPollPeriod = 0x0a,
249 RxDMAStatus = 0x0c,
250 RxListPtr = 0x10,
251 DebugCtrl0 = 0x1a,
252 DebugCtrl1 = 0x1c,
253 RxDMABurstThresh = 0x14,
254 RxDMAUrgentThresh = 0x15,
255 RxDMAPollPeriod = 0x16,
256 LEDCtrl = 0x1a,
257 ASICCtrl = 0x30,
258 EEData = 0x34,
259 EECtrl = 0x36,
1da177e4
LT
260 FlashAddr = 0x40,
261 FlashData = 0x44,
f210e87b 262 WakeEvent = 0x45,
1da177e4
LT
263 TxStatus = 0x46,
264 TxFrameId = 0x47,
265 DownCounter = 0x18,
266 IntrClear = 0x4a,
267 IntrEnable = 0x4c,
268 IntrStatus = 0x4e,
269 MACCtrl0 = 0x50,
270 MACCtrl1 = 0x52,
271 StationAddr = 0x54,
272 MaxFrameSize = 0x5A,
273 RxMode = 0x5c,
274 MIICtrl = 0x5e,
275 MulticastFilter0 = 0x60,
276 MulticastFilter1 = 0x64,
277 RxOctetsLow = 0x68,
278 RxOctetsHigh = 0x6a,
279 TxOctetsLow = 0x6c,
280 TxOctetsHigh = 0x6e,
281 TxFramesOK = 0x70,
282 RxFramesOK = 0x72,
283 StatsCarrierError = 0x74,
284 StatsLateColl = 0x75,
285 StatsMultiColl = 0x76,
286 StatsOneColl = 0x77,
287 StatsTxDefer = 0x78,
288 RxMissed = 0x79,
289 StatsTxXSDefer = 0x7a,
290 StatsTxAbort = 0x7b,
291 StatsBcastTx = 0x7c,
292 StatsBcastRx = 0x7d,
293 StatsMcastTx = 0x7e,
294 StatsMcastRx = 0x7f,
295 /* Aliased and bogus values! */
296 RxStatus = 0x0c,
297};
24de5285
DK
298
299#define ASIC_HI_WORD(x) ((x) + 2)
300
1da177e4
LT
301enum ASICCtrl_HiWord_bit {
302 GlobalReset = 0x0001,
303 RxReset = 0x0002,
304 TxReset = 0x0004,
305 DMAReset = 0x0008,
306 FIFOReset = 0x0010,
307 NetworkReset = 0x0020,
308 HostReset = 0x0040,
309 ResetBusy = 0x0400,
310};
311
312/* Bits in the interrupt status/mask registers. */
313enum intr_status_bits {
314 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
315 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
316 IntrDrvRqst=0x0040,
317 StatsMax=0x0080, LinkChange=0x0100,
318 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
319};
320
321/* Bits in the RxMode register. */
322enum rx_mode_bits {
323 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
324 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
325};
326/* Bits in MACCtrl. */
327enum mac_ctrl0_bits {
328 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
329 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
330};
331enum mac_ctrl1_bits {
332 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
333 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
334 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
335};
336
f210e87b
DK
337/* Bits in WakeEvent register. */
338enum wake_event_bits {
339 WakePktEnable = 0x01,
340 MagicPktEnable = 0x02,
341 LinkEventEnable = 0x04,
342 WolEnable = 0x80,
343};
344
1da177e4
LT
345/* The Rx and Tx buffer descriptors. */
346/* Note that using only 32 bit fields simplifies conversion to big-endian
347 architectures. */
348struct netdev_desc {
14c9d9b0
AV
349 __le32 next_desc;
350 __le32 status;
351 struct desc_frag { __le32 addr, length; } frag[1];
1da177e4
LT
352};
353
354/* Bits in netdev_desc.status */
355enum desc_status_bits {
356 DescOwn=0x8000,
357 DescEndPacket=0x4000,
358 DescEndRing=0x2000,
359 LastFrag=0x80000000,
360 DescIntrOnTx=0x8000,
361 DescIntrOnDMADone=0x80000000,
362 DisableAlign = 0x00000001,
363};
364
365#define PRIV_ALIGN 15 /* Required alignment mask */
366/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
367 within the structure. */
368#define MII_CNT 4
369struct netdev_private {
370 /* Descriptor rings first for alignment. */
371 struct netdev_desc *rx_ring;
372 struct netdev_desc *tx_ring;
373 struct sk_buff* rx_skbuff[RX_RING_SIZE];
374 struct sk_buff* tx_skbuff[TX_RING_SIZE];
375 dma_addr_t tx_ring_dma;
376 dma_addr_t rx_ring_dma;
1da177e4 377 struct timer_list timer; /* Media monitoring timer. */
725a4a46
DK
378 /* ethtool extra stats */
379 struct {
380 u64 tx_multiple_collisions;
381 u64 tx_single_collisions;
382 u64 tx_late_collisions;
383 u64 tx_deferred;
384 u64 tx_deferred_excessive;
385 u64 tx_aborted;
386 u64 tx_bcasts;
387 u64 rx_bcasts;
388 u64 tx_mcasts;
389 u64 rx_mcasts;
390 } xstats;
1da177e4
LT
391 /* Frequently used values: keep some adjacent for cache effect. */
392 spinlock_t lock;
1da177e4
LT
393 int msg_enable;
394 int chip_id;
395 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
396 unsigned int rx_buf_sz; /* Based on MTU+slack. */
397 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
398 unsigned int cur_tx, dirty_tx;
399 /* These values are keep track of the transceiver/media in use. */
400 unsigned int flowctrl:1;
401 unsigned int default_port:4; /* Last dev->if_port value. */
402 unsigned int an_enable:1;
403 unsigned int speed;
f210e87b 404 unsigned int wol_enabled:1; /* Wake on LAN enabled */
1da177e4
LT
405 struct tasklet_struct rx_tasklet;
406 struct tasklet_struct tx_tasklet;
407 int budget;
408 int cur_task;
409 /* Multicast and receive mode. */
410 spinlock_t mcastlock; /* SMP lock multicast updates. */
411 u16 mcast_filter[4];
412 /* MII transceiver section. */
413 struct mii_if_info mii_if;
414 int mii_preamble_required;
415 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
416 struct pci_dev *pci_dev;
417 void __iomem *base;
7b738b55 418 spinlock_t statlock;
1da177e4
LT
419};
420
421/* The station address location in the EEPROM. */
422#define EEPROM_SA_OFFSET 0x10
423#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
424 IntrDrvRqst | IntrTxDone | StatsMax | \
425 LinkChange)
426
427static int change_mtu(struct net_device *dev, int new_mtu);
428static int eeprom_read(void __iomem *ioaddr, int location);
429static int mdio_read(struct net_device *dev, int phy_id, int location);
430static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
50500155 431static int mdio_wait_link(struct net_device *dev, int wait);
1da177e4
LT
432static int netdev_open(struct net_device *dev);
433static void check_duplex(struct net_device *dev);
434static void netdev_timer(unsigned long data);
435static void tx_timeout(struct net_device *dev);
436static void init_ring(struct net_device *dev);
61357325 437static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
1da177e4 438static int reset_tx (struct net_device *dev);
7d12e780 439static irqreturn_t intr_handler(int irq, void *dev_instance);
1da177e4
LT
440static void rx_poll(unsigned long data);
441static void tx_poll(unsigned long data);
442static void refill_rx (struct net_device *dev);
443static void netdev_error(struct net_device *dev, int intr_status);
444static void netdev_error(struct net_device *dev, int intr_status);
445static void set_rx_mode(struct net_device *dev);
446static int __set_mac_addr(struct net_device *dev);
4b4f5467 447static int sundance_set_mac_addr(struct net_device *dev, void *data);
1da177e4
LT
448static struct net_device_stats *get_stats(struct net_device *dev);
449static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
450static int netdev_close(struct net_device *dev);
7282d491 451static const struct ethtool_ops ethtool_ops;
1da177e4 452
b71b95ef
PDM
453static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
454{
455 struct netdev_private *np = netdev_priv(dev);
456 void __iomem *ioaddr = np->base + ASICCtrl;
457 int countdown;
458
459 /* ST201 documentation states ASICCtrl is a 32bit register */
460 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
461 /* ST201 documentation states reset can take up to 1 ms */
462 countdown = 10 + 1;
463 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
464 if (--countdown == 0) {
465 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
466 break;
467 }
468 udelay(100);
469 }
470}
471
633a277e
SH
472static const struct net_device_ops netdev_ops = {
473 .ndo_open = netdev_open,
474 .ndo_stop = netdev_close,
475 .ndo_start_xmit = start_tx,
476 .ndo_get_stats = get_stats,
afc4b13d 477 .ndo_set_rx_mode = set_rx_mode,
633a277e
SH
478 .ndo_do_ioctl = netdev_ioctl,
479 .ndo_tx_timeout = tx_timeout,
480 .ndo_change_mtu = change_mtu,
4b4f5467 481 .ndo_set_mac_address = sundance_set_mac_addr,
633a277e
SH
482 .ndo_validate_addr = eth_validate_addr,
483};
484
64bc40de
BP
485static int sundance_probe1(struct pci_dev *pdev,
486 const struct pci_device_id *ent)
1da177e4
LT
487{
488 struct net_device *dev;
489 struct netdev_private *np;
490 static int card_idx;
491 int chip_idx = ent->driver_data;
492 int irq;
493 int i;
494 void __iomem *ioaddr;
495 u16 mii_ctl;
496 void *ring_space;
497 dma_addr_t ring_dma;
498#ifdef USE_IO_OPS
499 int bar = 0;
500#else
501 int bar = 1;
502#endif
ac1d49f8 503 int phy, phy_end, phy_idx = 0;
1da177e4
LT
504
505/* when built into the kernel, we only print version if device is found */
506#ifndef MODULE
507 static int printed_version;
508 if (!printed_version++)
509 printk(version);
510#endif
511
512 if (pci_enable_device(pdev))
513 return -EIO;
514 pci_set_master(pdev);
515
516 irq = pdev->irq;
517
518 dev = alloc_etherdev(sizeof(*np));
519 if (!dev)
520 return -ENOMEM;
1da177e4
LT
521 SET_NETDEV_DEV(dev, &pdev->dev);
522
523 if (pci_request_regions(pdev, DRV_NAME))
524 goto err_out_netdev;
525
526 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
527 if (!ioaddr)
528 goto err_out_res;
529
530 for (i = 0; i < 3; i++)
14c9d9b0
AV
531 ((__le16 *)dev->dev_addr)[i] =
532 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
1da177e4 533
1da177e4
LT
534 np = netdev_priv(dev);
535 np->base = ioaddr;
536 np->pci_dev = pdev;
537 np->chip_id = chip_idx;
538 np->msg_enable = (1 << debug) - 1;
539 spin_lock_init(&np->lock);
7b738b55 540 spin_lock_init(&np->statlock);
1da177e4
LT
541 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
542 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
543
0c8a745f
DK
544 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
545 &ring_dma, GFP_KERNEL);
1da177e4
LT
546 if (!ring_space)
547 goto err_out_cleardev;
548 np->tx_ring = (struct netdev_desc *)ring_space;
549 np->tx_ring_dma = ring_dma;
550
0c8a745f
DK
551 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
552 &ring_dma, GFP_KERNEL);
1da177e4
LT
553 if (!ring_space)
554 goto err_out_unmap_tx;
555 np->rx_ring = (struct netdev_desc *)ring_space;
556 np->rx_ring_dma = ring_dma;
557
558 np->mii_if.dev = dev;
559 np->mii_if.mdio_read = mdio_read;
560 np->mii_if.mdio_write = mdio_write;
561 np->mii_if.phy_id_mask = 0x1f;
562 np->mii_if.reg_num_mask = 0x1f;
563
564 /* The chip-specific entries in the device structure. */
633a277e 565 dev->netdev_ops = &netdev_ops;
1da177e4 566 SET_ETHTOOL_OPS(dev, &ethtool_ops);
1da177e4 567 dev->watchdog_timeo = TX_TIMEOUT;
633a277e 568
1da177e4
LT
569 pci_set_drvdata(pdev, dev);
570
1da177e4
LT
571 i = register_netdev(dev);
572 if (i)
573 goto err_out_unmap_rx;
574
e174961c 575 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
0795af57 576 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
e174961c 577 dev->dev_addr, irq);
1da177e4 578
67ec2f80
JL
579 np->phys[0] = 1; /* Default setting */
580 np->mii_preamble_required++;
ac1d49f8 581
0d615ec2
ACM
582 /*
583 * It seems some phys doesn't deal well with address 0 being accessed
ac1d49f8 584 * first
0d615ec2 585 */
ac1d49f8
JG
586 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
587 phy = 0;
588 phy_end = 31;
589 } else {
590 phy = 1;
591 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
592 }
593 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
b06c093e 594 int phyx = phy & 0x1f;
0d615ec2 595 int mii_status = mdio_read(dev, phyx, MII_BMSR);
67ec2f80 596 if (mii_status != 0xffff && mii_status != 0x0000) {
b06c093e
JL
597 np->phys[phy_idx++] = phyx;
598 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
67ec2f80
JL
599 if ((mii_status & 0x0040) == 0)
600 np->mii_preamble_required++;
601 printk(KERN_INFO "%s: MII PHY found at address %d, status "
602 "0x%4.4x advertising %4.4x.\n",
b06c093e 603 dev->name, phyx, mii_status, np->mii_if.advertising);
1da177e4 604 }
67ec2f80
JL
605 }
606 np->mii_preamble_required--;
1da177e4 607
67ec2f80
JL
608 if (phy_idx == 0) {
609 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
610 dev->name, ioread32(ioaddr + ASICCtrl));
611 goto err_out_unregister;
1da177e4
LT
612 }
613
67ec2f80
JL
614 np->mii_if.phy_id = np->phys[0];
615
1da177e4
LT
616 /* Parse override configuration */
617 np->an_enable = 1;
618 if (card_idx < MAX_UNITS) {
619 if (media[card_idx] != NULL) {
620 np->an_enable = 0;
621 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
622 strcmp (media[card_idx], "4") == 0) {
623 np->speed = 100;
624 np->mii_if.full_duplex = 1;
8e95a202
JP
625 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
626 strcmp (media[card_idx], "3") == 0) {
1da177e4
LT
627 np->speed = 100;
628 np->mii_if.full_duplex = 0;
629 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
630 strcmp (media[card_idx], "2") == 0) {
631 np->speed = 10;
632 np->mii_if.full_duplex = 1;
633 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
634 strcmp (media[card_idx], "1") == 0) {
635 np->speed = 10;
636 np->mii_if.full_duplex = 0;
637 } else {
638 np->an_enable = 1;
639 }
640 }
641 if (flowctrl == 1)
642 np->flowctrl = 1;
643 }
644
645 /* Fibre PHY? */
646 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
647 /* Default 100Mbps Full */
648 if (np->an_enable) {
649 np->speed = 100;
650 np->mii_if.full_duplex = 1;
651 np->an_enable = 0;
652 }
653 }
654 /* Reset PHY */
655 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
656 mdelay (300);
657 /* If flow control enabled, we need to advertise it.*/
658 if (np->flowctrl)
659 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
660 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
661 /* Force media type */
662 if (!np->an_enable) {
663 mii_ctl = 0;
664 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
665 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
666 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
667 printk (KERN_INFO "Override speed=%d, %s duplex\n",
668 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
669
670 }
671
672 /* Perhaps move the reset here? */
673 /* Reset the chip to erase previous misconfiguration. */
674 if (netif_msg_hw(np))
675 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
e714d99c 676 sundance_reset(dev, 0x00ff << 16);
1da177e4
LT
677 if (netif_msg_hw(np))
678 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
679
680 card_idx++;
681 return 0;
682
683err_out_unregister:
684 unregister_netdev(dev);
685err_out_unmap_rx:
0c8a745f
DK
686 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
687 np->rx_ring, np->rx_ring_dma);
1da177e4 688err_out_unmap_tx:
0c8a745f
DK
689 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
690 np->tx_ring, np->tx_ring_dma);
1da177e4
LT
691err_out_cleardev:
692 pci_set_drvdata(pdev, NULL);
693 pci_iounmap(pdev, ioaddr);
694err_out_res:
695 pci_release_regions(pdev);
696err_out_netdev:
697 free_netdev (dev);
698 return -ENODEV;
699}
700
701static int change_mtu(struct net_device *dev, int new_mtu)
702{
703 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
704 return -EINVAL;
705 if (netif_running(dev))
706 return -EBUSY;
707 dev->mtu = new_mtu;
708 return 0;
709}
710
711#define eeprom_delay(ee_addr) ioread32(ee_addr)
712/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
64bc40de 713static int eeprom_read(void __iomem *ioaddr, int location)
1da177e4
LT
714{
715 int boguscnt = 10000; /* Typical 1900 ticks. */
716 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
717 do {
718 eeprom_delay(ioaddr + EECtrl);
719 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
720 return ioread16(ioaddr + EEData);
721 }
722 } while (--boguscnt > 0);
723 return 0;
724}
725
726/* MII transceiver control section.
727 Read and write the MII registers using software-generated serial
728 MDIO protocol. See the MII specifications or DP83840A data sheet
729 for details.
730
731 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
732 met by back-to-back 33Mhz PCI cycles. */
733#define mdio_delay() ioread8(mdio_addr)
734
735enum mii_reg_bits {
736 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
737};
738#define MDIO_EnbIn (0)
739#define MDIO_WRITE0 (MDIO_EnbOutput)
740#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
741
742/* Generate the preamble required for initial synchronization and
743 a few older transceivers. */
744static void mdio_sync(void __iomem *mdio_addr)
745{
746 int bits = 32;
747
748 /* Establish sync by sending at least 32 logic ones. */
749 while (--bits >= 0) {
750 iowrite8(MDIO_WRITE1, mdio_addr);
751 mdio_delay();
752 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
753 mdio_delay();
754 }
755}
756
757static int mdio_read(struct net_device *dev, int phy_id, int location)
758{
759 struct netdev_private *np = netdev_priv(dev);
760 void __iomem *mdio_addr = np->base + MIICtrl;
761 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
762 int i, retval = 0;
763
764 if (np->mii_preamble_required)
765 mdio_sync(mdio_addr);
766
767 /* Shift the read command bits out. */
768 for (i = 15; i >= 0; i--) {
769 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
770
771 iowrite8(dataval, mdio_addr);
772 mdio_delay();
773 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
774 mdio_delay();
775 }
776 /* Read the two transition, 16 data, and wire-idle bits. */
777 for (i = 19; i > 0; i--) {
778 iowrite8(MDIO_EnbIn, mdio_addr);
779 mdio_delay();
780 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
781 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
782 mdio_delay();
783 }
784 return (retval>>1) & 0xffff;
785}
786
787static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
788{
789 struct netdev_private *np = netdev_priv(dev);
790 void __iomem *mdio_addr = np->base + MIICtrl;
791 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
792 int i;
793
794 if (np->mii_preamble_required)
795 mdio_sync(mdio_addr);
796
797 /* Shift the command bits out. */
798 for (i = 31; i >= 0; i--) {
799 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
800
801 iowrite8(dataval, mdio_addr);
802 mdio_delay();
803 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
804 mdio_delay();
805 }
806 /* Clear out extra bits. */
807 for (i = 2; i > 0; i--) {
808 iowrite8(MDIO_EnbIn, mdio_addr);
809 mdio_delay();
810 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
811 mdio_delay();
812 }
1da177e4
LT
813}
814
50500155
DN
815static int mdio_wait_link(struct net_device *dev, int wait)
816{
817 int bmsr;
818 int phy_id;
819 struct netdev_private *np;
820
821 np = netdev_priv(dev);
822 phy_id = np->phys[0];
823
824 do {
825 bmsr = mdio_read(dev, phy_id, MII_BMSR);
826 if (bmsr & 0x0004)
827 return 0;
828 mdelay(1);
829 } while (--wait > 0);
830 return -1;
831}
832
1da177e4
LT
833static int netdev_open(struct net_device *dev)
834{
835 struct netdev_private *np = netdev_priv(dev);
836 void __iomem *ioaddr = np->base;
c514f285 837 const int irq = np->pci_dev->irq;
acd70c2b 838 unsigned long flags;
1da177e4
LT
839 int i;
840
f210e87b 841 sundance_reset(dev, 0x00ff << 16);
1da177e4 842
c514f285 843 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1da177e4
LT
844 if (i)
845 return i;
846
847 if (netif_msg_ifup(np))
c514f285
FR
848 printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
849
1da177e4
LT
850 init_ring(dev);
851
852 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
853 /* The Tx list pointer is written as packets are queued. */
854
855 /* Initialize other registers. */
856 __set_mac_addr(dev);
857#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
858 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
859#else
860 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
861#endif
862 if (dev->mtu > 2047)
863 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
864
865 /* Configure the PCI bus bursts and FIFO thresholds. */
866
867 if (dev->if_port == 0)
868 dev->if_port = np->default_port;
869
870 spin_lock_init(&np->mcastlock);
871
872 set_rx_mode(dev);
873 iowrite16(0, ioaddr + IntrEnable);
874 iowrite16(0, ioaddr + DownCounter);
875 /* Set the chip to poll every N*320nsec. */
876 iowrite8(100, ioaddr + RxDMAPollPeriod);
877 iowrite8(127, ioaddr + TxDMAPollPeriod);
878 /* Fix DFE-580TX packet drop issue */
44c10138 879 if (np->pci_dev->revision >= 0x14)
1da177e4
LT
880 iowrite8(0x01, ioaddr + DebugCtrl1);
881 netif_start_queue(dev);
882
acd70c2b
JH
883 spin_lock_irqsave(&np->lock, flags);
884 reset_tx(dev);
885 spin_unlock_irqrestore(&np->lock, flags);
886
1da177e4
LT
887 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
888
f210e87b
DK
889 /* Disable Wol */
890 iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
891 np->wol_enabled = 0;
892
1da177e4
LT
893 if (netif_msg_ifup(np))
894 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
895 "MAC Control %x, %4.4x %4.4x.\n",
896 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
897 ioread32(ioaddr + MACCtrl0),
898 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
899
900 /* Set the timer to check for link beat. */
901 init_timer(&np->timer);
902 np->timer.expires = jiffies + 3*HZ;
903 np->timer.data = (unsigned long)dev;
c061b18d 904 np->timer.function = netdev_timer; /* timer handler */
1da177e4
LT
905 add_timer(&np->timer);
906
907 /* Enable interrupts by setting the interrupt mask. */
908 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
909
910 return 0;
911}
912
913static void check_duplex(struct net_device *dev)
914{
915 struct netdev_private *np = netdev_priv(dev);
916 void __iomem *ioaddr = np->base;
917 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
918 int negotiated = mii_lpa & np->mii_if.advertising;
919 int duplex;
920
921 /* Force media */
922 if (!np->an_enable || mii_lpa == 0xffff) {
923 if (np->mii_if.full_duplex)
924 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
925 ioaddr + MACCtrl0);
926 return;
927 }
928
929 /* Autonegotiation */
930 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
931 if (np->mii_if.full_duplex != duplex) {
932 np->mii_if.full_duplex = duplex;
933 if (netif_msg_link(np))
934 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
935 "negotiated capability %4.4x.\n", dev->name,
936 duplex ? "full" : "half", np->phys[0], negotiated);
62660e28 937 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
1da177e4
LT
938 }
939}
940
941static void netdev_timer(unsigned long data)
942{
943 struct net_device *dev = (struct net_device *)data;
944 struct netdev_private *np = netdev_priv(dev);
945 void __iomem *ioaddr = np->base;
946 int next_tick = 10*HZ;
947
948 if (netif_msg_timer(np)) {
949 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
950 "Tx %x Rx %x.\n",
951 dev->name, ioread16(ioaddr + IntrEnable),
952 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
953 }
954 check_duplex(dev);
955 np->timer.expires = jiffies + next_tick;
956 add_timer(&np->timer);
957}
958
959static void tx_timeout(struct net_device *dev)
960{
961 struct netdev_private *np = netdev_priv(dev);
962 void __iomem *ioaddr = np->base;
963 unsigned long flag;
6aa20a22 964
1da177e4
LT
965 netif_stop_queue(dev);
966 tasklet_disable(&np->tx_tasklet);
967 iowrite16(0, ioaddr + IntrEnable);
968 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
969 "TxFrameId %2.2x,"
970 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
971 ioread8(ioaddr + TxFrameId));
972
973 {
974 int i;
975 for (i=0; i<TX_RING_SIZE; i++) {
976 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
977 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
978 le32_to_cpu(np->tx_ring[i].next_desc),
979 le32_to_cpu(np->tx_ring[i].status),
980 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
6aa20a22 981 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1da177e4
LT
982 le32_to_cpu(np->tx_ring[i].frag[0].length));
983 }
6aa20a22
JG
984 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
985 ioread32(np->base + TxListPtr),
1da177e4 986 netif_queue_stopped(dev));
6aa20a22 987 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1da177e4
LT
988 np->cur_tx, np->cur_tx % TX_RING_SIZE,
989 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
990 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
991 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
992 }
993 spin_lock_irqsave(&np->lock, flag);
994
995 /* Stop and restart the chip's Tx processes . */
996 reset_tx(dev);
997 spin_unlock_irqrestore(&np->lock, flag);
998
999 dev->if_port = 0;
1000
1ae5dc34 1001 dev->trans_start = jiffies; /* prevent tx timeout */
553e2335 1002 dev->stats.tx_errors++;
1da177e4
LT
1003 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1004 netif_wake_queue(dev);
1005 }
1006 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1007 tasklet_enable(&np->tx_tasklet);
1008}
1009
1010
1011/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1012static void init_ring(struct net_device *dev)
1013{
1014 struct netdev_private *np = netdev_priv(dev);
1015 int i;
1016
1017 np->cur_rx = np->cur_tx = 0;
1018 np->dirty_rx = np->dirty_tx = 0;
1019 np->cur_task = 0;
1020
1021 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1022
1023 /* Initialize all Rx descriptors. */
1024 for (i = 0; i < RX_RING_SIZE; i++) {
1025 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1026 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1027 np->rx_ring[i].status = 0;
1028 np->rx_ring[i].frag[0].length = 0;
1029 np->rx_skbuff[i] = NULL;
1030 }
1031
1032 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1033 for (i = 0; i < RX_RING_SIZE; i++) {
21a4e469
PD
1034 struct sk_buff *skb =
1035 netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1da177e4
LT
1036 np->rx_skbuff[i] = skb;
1037 if (skb == NULL)
1038 break;
1da177e4
LT
1039 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1040 np->rx_ring[i].frag[0].addr = cpu_to_le32(
0c8a745f
DK
1041 dma_map_single(&np->pci_dev->dev, skb->data,
1042 np->rx_buf_sz, DMA_FROM_DEVICE));
d91dc279
DK
1043 if (dma_mapping_error(&np->pci_dev->dev,
1044 np->rx_ring[i].frag[0].addr)) {
1045 dev_kfree_skb(skb);
1046 np->rx_skbuff[i] = NULL;
1047 break;
1048 }
1da177e4
LT
1049 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1050 }
1051 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1052
1053 for (i = 0; i < TX_RING_SIZE; i++) {
1054 np->tx_skbuff[i] = NULL;
1055 np->tx_ring[i].status = 0;
1056 }
1da177e4
LT
1057}
1058
1059static void tx_poll (unsigned long data)
1060{
1061 struct net_device *dev = (struct net_device *)data;
1062 struct netdev_private *np = netdev_priv(dev);
1063 unsigned head = np->cur_task % TX_RING_SIZE;
6aa20a22 1064 struct netdev_desc *txdesc =
1da177e4 1065 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
6aa20a22 1066
1da177e4
LT
1067 /* Chain the next pointer */
1068 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1069 int entry = np->cur_task % TX_RING_SIZE;
1070 txdesc = &np->tx_ring[entry];
1071 if (np->last_tx) {
1072 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1073 entry*sizeof(struct netdev_desc));
1074 }
1075 np->last_tx = txdesc;
1076 }
1077 /* Indicate the latest descriptor of tx ring */
1078 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1079
1080 if (ioread32 (np->base + TxListPtr) == 0)
1081 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1082 np->base + TxListPtr);
1da177e4
LT
1083}
1084
61357325 1085static netdev_tx_t
1da177e4
LT
1086start_tx (struct sk_buff *skb, struct net_device *dev)
1087{
1088 struct netdev_private *np = netdev_priv(dev);
1089 struct netdev_desc *txdesc;
1090 unsigned entry;
1091
1092 /* Calculate the next Tx descriptor entry. */
1093 entry = np->cur_tx % TX_RING_SIZE;
1094 np->tx_skbuff[entry] = skb;
1095 txdesc = &np->tx_ring[entry];
1096
1097 txdesc->next_desc = 0;
1098 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
0c8a745f
DK
1099 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1100 skb->data, skb->len, DMA_TO_DEVICE));
d91dc279
DK
1101 if (dma_mapping_error(&np->pci_dev->dev,
1102 txdesc->frag[0].addr))
1103 goto drop_frame;
1da177e4
LT
1104 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1105
1106 /* Increment cur_tx before tasklet_schedule() */
1107 np->cur_tx++;
1108 mb();
1109 /* Schedule a tx_poll() task */
1110 tasklet_schedule(&np->tx_tasklet);
1111
1112 /* On some architectures: explicitly flush cache lines here. */
8e95a202
JP
1113 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1114 !netif_queue_stopped(dev)) {
1da177e4
LT
1115 /* do nothing */
1116 } else {
1117 netif_stop_queue (dev);
1118 }
1da177e4
LT
1119 if (netif_msg_tx_queued(np)) {
1120 printk (KERN_DEBUG
1121 "%s: Transmit frame #%d queued in slot %d.\n",
1122 dev->name, np->cur_tx, entry);
1123 }
6ed10654 1124 return NETDEV_TX_OK;
d91dc279
DK
1125
1126drop_frame:
1127 dev_kfree_skb(skb);
1128 np->tx_skbuff[entry] = NULL;
1129 dev->stats.tx_dropped++;
1130 return NETDEV_TX_OK;
1da177e4
LT
1131}
1132
1133/* Reset hardware tx and free all of tx buffers */
1134static int
1135reset_tx (struct net_device *dev)
1136{
1137 struct netdev_private *np = netdev_priv(dev);
1138 void __iomem *ioaddr = np->base;
1139 struct sk_buff *skb;
1140 int i;
6aa20a22 1141
1da177e4
LT
1142 /* Reset tx logic, TxListPtr will be cleaned */
1143 iowrite16 (TxDisable, ioaddr + MACCtrl1);
e714d99c
PDM
1144 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1145
1da177e4
LT
1146 /* free all tx skbuff */
1147 for (i = 0; i < TX_RING_SIZE; i++) {
2109f89f
JH
1148 np->tx_ring[i].next_desc = 0;
1149
1da177e4
LT
1150 skb = np->tx_skbuff[i];
1151 if (skb) {
0c8a745f 1152 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1153 le32_to_cpu(np->tx_ring[i].frag[0].addr),
0c8a745f 1154 skb->len, DMA_TO_DEVICE);
a9478e38 1155 dev_kfree_skb_any(skb);
1da177e4 1156 np->tx_skbuff[i] = NULL;
553e2335 1157 dev->stats.tx_dropped++;
1da177e4
LT
1158 }
1159 }
1160 np->cur_tx = np->dirty_tx = 0;
1161 np->cur_task = 0;
2109f89f 1162
bca79eb7 1163 np->last_tx = NULL;
2109f89f
JH
1164 iowrite8(127, ioaddr + TxDMAPollPeriod);
1165
1da177e4
LT
1166 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1167 return 0;
1168}
1169
6aa20a22 1170/* The interrupt handler cleans up after the Tx thread,
1da177e4 1171 and schedule a Rx thread work */
7d12e780 1172static irqreturn_t intr_handler(int irq, void *dev_instance)
1da177e4
LT
1173{
1174 struct net_device *dev = (struct net_device *)dev_instance;
1175 struct netdev_private *np = netdev_priv(dev);
1176 void __iomem *ioaddr = np->base;
1177 int hw_frame_id;
1178 int tx_cnt;
1179 int tx_status;
1180 int handled = 0;
e242040d 1181 int i;
1da177e4
LT
1182
1183
1184 do {
1185 int intr_status = ioread16(ioaddr + IntrStatus);
1186 iowrite16(intr_status, ioaddr + IntrStatus);
1187
1188 if (netif_msg_intr(np))
1189 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1190 dev->name, intr_status);
1191
1192 if (!(intr_status & DEFAULT_INTR))
1193 break;
1194
1195 handled = 1;
1196
1197 if (intr_status & (IntrRxDMADone)) {
1198 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1199 ioaddr + IntrEnable);
1200 if (np->budget < 0)
1201 np->budget = RX_BUDGET;
1202 tasklet_schedule(&np->rx_tasklet);
1203 }
1204 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1205 tx_status = ioread16 (ioaddr + TxStatus);
1206 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1207 if (netif_msg_tx_done(np))
1208 printk
1209 ("%s: Transmit status is %2.2x.\n",
1210 dev->name, tx_status);
1211 if (tx_status & 0x1e) {
b71b95ef
PDM
1212 if (netif_msg_tx_err(np))
1213 printk("%s: Transmit error status %4.4x.\n",
1214 dev->name, tx_status);
553e2335 1215 dev->stats.tx_errors++;
1da177e4 1216 if (tx_status & 0x10)
553e2335 1217 dev->stats.tx_fifo_errors++;
1da177e4 1218 if (tx_status & 0x08)
553e2335 1219 dev->stats.collisions++;
b71b95ef 1220 if (tx_status & 0x04)
553e2335 1221 dev->stats.tx_fifo_errors++;
1da177e4 1222 if (tx_status & 0x02)
553e2335 1223 dev->stats.tx_window_errors++;
e242040d 1224
b71b95ef
PDM
1225 /*
1226 ** This reset has been verified on
1227 ** DFE-580TX boards ! phdm@macqel.be.
1228 */
1229 if (tx_status & 0x10) { /* TxUnderrun */
b71b95ef
PDM
1230 /* Restart Tx FIFO and transmitter */
1231 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
b71b95ef 1232 /* No need to reset the Tx pointer here */
1da177e4 1233 }
2109f89f
JH
1234 /* Restart the Tx. Need to make sure tx enabled */
1235 i = 10;
1236 do {
1237 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1238 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1239 break;
1240 mdelay(1);
1241 } while (--i);
1da177e4
LT
1242 }
1243 /* Yup, this is a documentation bug. It cost me *hours*. */
1244 iowrite16 (0, ioaddr + TxStatus);
1245 if (tx_cnt < 0) {
1246 iowrite32(5000, ioaddr + DownCounter);
1247 break;
1248 }
1249 tx_status = ioread16 (ioaddr + TxStatus);
1250 }
1251 hw_frame_id = (tx_status >> 8) & 0xff;
1252 } else {
1253 hw_frame_id = ioread8(ioaddr + TxFrameId);
1254 }
6aa20a22 1255
44c10138 1256 if (np->pci_dev->revision >= 0x14) {
1da177e4
LT
1257 spin_lock(&np->lock);
1258 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1259 int entry = np->dirty_tx % TX_RING_SIZE;
1260 struct sk_buff *skb;
1261 int sw_frame_id;
1262 sw_frame_id = (le32_to_cpu(
1263 np->tx_ring[entry].status) >> 2) & 0xff;
1264 if (sw_frame_id == hw_frame_id &&
1265 !(le32_to_cpu(np->tx_ring[entry].status)
1266 & 0x00010000))
1267 break;
6aa20a22 1268 if (sw_frame_id == (hw_frame_id + 1) %
1da177e4
LT
1269 TX_RING_SIZE)
1270 break;
1271 skb = np->tx_skbuff[entry];
1272 /* Free the original skb. */
0c8a745f 1273 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1274 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
0c8a745f 1275 skb->len, DMA_TO_DEVICE);
1da177e4
LT
1276 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1277 np->tx_skbuff[entry] = NULL;
1278 np->tx_ring[entry].frag[0].addr = 0;
1279 np->tx_ring[entry].frag[0].length = 0;
1280 }
1281 spin_unlock(&np->lock);
1282 } else {
1283 spin_lock(&np->lock);
1284 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1285 int entry = np->dirty_tx % TX_RING_SIZE;
1286 struct sk_buff *skb;
6aa20a22 1287 if (!(le32_to_cpu(np->tx_ring[entry].status)
1da177e4
LT
1288 & 0x00010000))
1289 break;
1290 skb = np->tx_skbuff[entry];
1291 /* Free the original skb. */
0c8a745f 1292 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1293 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
0c8a745f 1294 skb->len, DMA_TO_DEVICE);
1da177e4
LT
1295 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1296 np->tx_skbuff[entry] = NULL;
1297 np->tx_ring[entry].frag[0].addr = 0;
1298 np->tx_ring[entry].frag[0].length = 0;
1299 }
1300 spin_unlock(&np->lock);
1301 }
6aa20a22 1302
1da177e4
LT
1303 if (netif_queue_stopped(dev) &&
1304 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1305 /* The ring is no longer full, clear busy flag. */
1306 netif_wake_queue (dev);
1307 }
1308 /* Abnormal error summary/uncommon events handlers. */
1309 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1310 netdev_error(dev, intr_status);
1311 } while (0);
1312 if (netif_msg_intr(np))
1313 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1314 dev->name, ioread16(ioaddr + IntrStatus));
1315 return IRQ_RETVAL(handled);
1316}
1317
1318static void rx_poll(unsigned long data)
1319{
1320 struct net_device *dev = (struct net_device *)data;
1321 struct netdev_private *np = netdev_priv(dev);
1322 int entry = np->cur_rx % RX_RING_SIZE;
1323 int boguscnt = np->budget;
1324 void __iomem *ioaddr = np->base;
1325 int received = 0;
1326
1327 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1328 while (1) {
1329 struct netdev_desc *desc = &(np->rx_ring[entry]);
1330 u32 frame_status = le32_to_cpu(desc->status);
1331 int pkt_len;
1332
1333 if (--boguscnt < 0) {
1334 goto not_done;
1335 }
1336 if (!(frame_status & DescOwn))
1337 break;
1338 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1339 if (netif_msg_rx_status(np))
1340 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1341 frame_status);
1342 if (frame_status & 0x001f4000) {
1343 /* There was a error. */
1344 if (netif_msg_rx_err(np))
1345 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1346 frame_status);
553e2335
ED
1347 dev->stats.rx_errors++;
1348 if (frame_status & 0x00100000)
1349 dev->stats.rx_length_errors++;
1350 if (frame_status & 0x00010000)
1351 dev->stats.rx_fifo_errors++;
1352 if (frame_status & 0x00060000)
1353 dev->stats.rx_frame_errors++;
1354 if (frame_status & 0x00080000)
1355 dev->stats.rx_crc_errors++;
1da177e4
LT
1356 if (frame_status & 0x00100000) {
1357 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1358 " status %8.8x.\n",
1359 dev->name, frame_status);
1360 }
1361 } else {
1362 struct sk_buff *skb;
1363#ifndef final_version
1364 if (netif_msg_rx_status(np))
1365 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1366 ", bogus_cnt %d.\n",
1367 pkt_len, boguscnt);
1368#endif
1369 /* Check if the packet is long enough to accept without copying
1370 to a minimally-sized skbuff. */
8e95a202 1371 if (pkt_len < rx_copybreak &&
21a4e469 1372 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1da177e4 1373 skb_reserve(skb, 2); /* 16 byte align the IP header */
0c8a745f
DK
1374 dma_sync_single_for_cpu(&np->pci_dev->dev,
1375 le32_to_cpu(desc->frag[0].addr),
1376 np->rx_buf_sz, DMA_FROM_DEVICE);
8c7b7faa 1377 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
0c8a745f
DK
1378 dma_sync_single_for_device(&np->pci_dev->dev,
1379 le32_to_cpu(desc->frag[0].addr),
1380 np->rx_buf_sz, DMA_FROM_DEVICE);
1da177e4
LT
1381 skb_put(skb, pkt_len);
1382 } else {
0c8a745f 1383 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1384 le32_to_cpu(desc->frag[0].addr),
0c8a745f 1385 np->rx_buf_sz, DMA_FROM_DEVICE);
1da177e4
LT
1386 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1387 np->rx_skbuff[entry] = NULL;
1388 }
1389 skb->protocol = eth_type_trans(skb, dev);
1390 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1391 netif_rx(skb);
1da177e4
LT
1392 }
1393 entry = (entry + 1) % RX_RING_SIZE;
1394 received++;
1395 }
1396 np->cur_rx = entry;
1397 refill_rx (dev);
1398 np->budget -= received;
1399 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1400 return;
1401
1402not_done:
1403 np->cur_rx = entry;
1404 refill_rx (dev);
1405 if (!received)
1406 received = 1;
1407 np->budget -= received;
1408 if (np->budget <= 0)
1409 np->budget = RX_BUDGET;
1410 tasklet_schedule(&np->rx_tasklet);
1da177e4
LT
1411}
1412
1413static void refill_rx (struct net_device *dev)
1414{
1415 struct netdev_private *np = netdev_priv(dev);
1416 int entry;
1417 int cnt = 0;
1418
1419 /* Refill the Rx ring buffers. */
1420 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1421 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1422 struct sk_buff *skb;
1423 entry = np->dirty_rx % RX_RING_SIZE;
1424 if (np->rx_skbuff[entry] == NULL) {
21a4e469 1425 skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1da177e4
LT
1426 np->rx_skbuff[entry] = skb;
1427 if (skb == NULL)
1428 break; /* Better luck next round. */
1da177e4
LT
1429 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1430 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
0c8a745f
DK
1431 dma_map_single(&np->pci_dev->dev, skb->data,
1432 np->rx_buf_sz, DMA_FROM_DEVICE));
d91dc279
DK
1433 if (dma_mapping_error(&np->pci_dev->dev,
1434 np->rx_ring[entry].frag[0].addr)) {
1435 dev_kfree_skb_irq(skb);
1436 np->rx_skbuff[entry] = NULL;
1437 break;
1438 }
1da177e4
LT
1439 }
1440 /* Perhaps we need not reset this field. */
1441 np->rx_ring[entry].frag[0].length =
1442 cpu_to_le32(np->rx_buf_sz | LastFrag);
1443 np->rx_ring[entry].status = 0;
1444 cnt++;
1445 }
1da177e4
LT
1446}
1447static void netdev_error(struct net_device *dev, int intr_status)
1448{
1449 struct netdev_private *np = netdev_priv(dev);
1450 void __iomem *ioaddr = np->base;
1451 u16 mii_ctl, mii_advertise, mii_lpa;
1452 int speed;
1453
1454 if (intr_status & LinkChange) {
50500155
DN
1455 if (mdio_wait_link(dev, 10) == 0) {
1456 printk(KERN_INFO "%s: Link up\n", dev->name);
1457 if (np->an_enable) {
1458 mii_advertise = mdio_read(dev, np->phys[0],
1459 MII_ADVERTISE);
1460 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1461 mii_advertise &= mii_lpa;
1462 printk(KERN_INFO "%s: Link changed: ",
1463 dev->name);
1464 if (mii_advertise & ADVERTISE_100FULL) {
1465 np->speed = 100;
1466 printk("100Mbps, full duplex\n");
1467 } else if (mii_advertise & ADVERTISE_100HALF) {
1468 np->speed = 100;
1469 printk("100Mbps, half duplex\n");
1470 } else if (mii_advertise & ADVERTISE_10FULL) {
1471 np->speed = 10;
1472 printk("10Mbps, full duplex\n");
1473 } else if (mii_advertise & ADVERTISE_10HALF) {
1474 np->speed = 10;
1475 printk("10Mbps, half duplex\n");
1476 } else
1477 printk("\n");
1da177e4 1478
50500155
DN
1479 } else {
1480 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1481 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1482 np->speed = speed;
1483 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1484 dev->name, speed);
1485 printk("%s duplex.\n",
1486 (mii_ctl & BMCR_FULLDPLX) ?
1487 "full" : "half");
1488 }
1489 check_duplex(dev);
1490 if (np->flowctrl && np->mii_if.full_duplex) {
1491 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1492 ioaddr + MulticastFilter1+2);
1493 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1494 ioaddr + MACCtrl0);
1495 }
1496 netif_carrier_on(dev);
1da177e4 1497 } else {
50500155
DN
1498 printk(KERN_INFO "%s: Link down\n", dev->name);
1499 netif_carrier_off(dev);
1da177e4
LT
1500 }
1501 }
1502 if (intr_status & StatsMax) {
1503 get_stats(dev);
1504 }
1505 if (intr_status & IntrPCIErr) {
1506 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1507 dev->name, intr_status);
1508 /* We must do a global reset of DMA to continue. */
1509 }
1510}
1511
1512static struct net_device_stats *get_stats(struct net_device *dev)
1513{
1514 struct netdev_private *np = netdev_priv(dev);
1515 void __iomem *ioaddr = np->base;
7b738b55 1516 unsigned long flags;
725a4a46 1517 u8 late_coll, single_coll, mult_coll;
1da177e4 1518
7b738b55 1519 spin_lock_irqsave(&np->statlock, flags);
1da177e4 1520 /* The chip only need report frame silently dropped. */
553e2335
ED
1521 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1522 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1523 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
553e2335 1524 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
725a4a46
DK
1525
1526 mult_coll = ioread8(ioaddr + StatsMultiColl);
1527 np->xstats.tx_multiple_collisions += mult_coll;
1528 single_coll = ioread8(ioaddr + StatsOneColl);
1529 np->xstats.tx_single_collisions += single_coll;
1530 late_coll = ioread8(ioaddr + StatsLateColl);
1531 np->xstats.tx_late_collisions += late_coll;
1532 dev->stats.collisions += mult_coll
1533 + single_coll
1534 + late_coll;
1535
1536 np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1537 np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1538 np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1539 np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1540 np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1541 np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1542 np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1543
553e2335
ED
1544 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1545 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1546 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1547 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1da177e4 1548
7b738b55
ED
1549 spin_unlock_irqrestore(&np->statlock, flags);
1550
553e2335 1551 return &dev->stats;
1da177e4
LT
1552}
1553
1554static void set_rx_mode(struct net_device *dev)
1555{
1556 struct netdev_private *np = netdev_priv(dev);
1557 void __iomem *ioaddr = np->base;
1558 u16 mc_filter[4]; /* Multicast hash filter */
1559 u32 rx_mode;
1560 int i;
1561
1562 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4
LT
1563 memset(mc_filter, 0xff, sizeof(mc_filter));
1564 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
4cd24eaf 1565 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
8e95a202 1566 (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
1567 /* Too many to match, or accept all multicasts. */
1568 memset(mc_filter, 0xff, sizeof(mc_filter));
1569 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4cd24eaf 1570 } else if (!netdev_mc_empty(dev)) {
22bedad3 1571 struct netdev_hw_addr *ha;
1da177e4
LT
1572 int bit;
1573 int index;
1574 int crc;
1575 memset (mc_filter, 0, sizeof (mc_filter));
22bedad3
JP
1576 netdev_for_each_mc_addr(ha, dev) {
1577 crc = ether_crc_le(ETH_ALEN, ha->addr);
1da177e4
LT
1578 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1579 if (crc & 0x80000000) index |= 1 << bit;
1580 mc_filter[index/16] |= (1 << (index % 16));
1581 }
1582 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1583 } else {
1584 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1585 return;
1586 }
1587 if (np->mii_if.full_duplex && np->flowctrl)
1588 mc_filter[3] |= 0x0200;
1589
1590 for (i = 0; i < 4; i++)
1591 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1592 iowrite8(rx_mode, ioaddr + RxMode);
1593}
1594
1595static int __set_mac_addr(struct net_device *dev)
1596{
1597 struct netdev_private *np = netdev_priv(dev);
1598 u16 addr16;
1599
1600 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1601 iowrite16(addr16, np->base + StationAddr);
1602 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1603 iowrite16(addr16, np->base + StationAddr+2);
1604 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1605 iowrite16(addr16, np->base + StationAddr+4);
1606 return 0;
1607}
1608
4b4f5467
DK
1609/* Invoked with rtnl_lock held */
1610static int sundance_set_mac_addr(struct net_device *dev, void *data)
1611{
1612 const struct sockaddr *addr = data;
1613
1614 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 1615 return -EADDRNOTAVAIL;
4b4f5467
DK
1616 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1617 __set_mac_addr(dev);
1618
1619 return 0;
1620}
1621
725a4a46
DK
1622static const struct {
1623 const char name[ETH_GSTRING_LEN];
1624} sundance_stats[] = {
1625 { "tx_multiple_collisions" },
1626 { "tx_single_collisions" },
1627 { "tx_late_collisions" },
1628 { "tx_deferred" },
1629 { "tx_deferred_excessive" },
1630 { "tx_aborted" },
1631 { "tx_bcasts" },
1632 { "rx_bcasts" },
1633 { "tx_mcasts" },
1634 { "rx_mcasts" },
1635};
1636
1da177e4
LT
1637static int check_if_running(struct net_device *dev)
1638{
1639 if (!netif_running(dev))
1640 return -EINVAL;
1641 return 0;
1642}
1643
1644static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1645{
1646 struct netdev_private *np = netdev_priv(dev);
68aad78c
RJ
1647 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1648 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1649 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1da177e4
LT
1650}
1651
1652static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1653{
1654 struct netdev_private *np = netdev_priv(dev);
1655 spin_lock_irq(&np->lock);
1656 mii_ethtool_gset(&np->mii_if, ecmd);
1657 spin_unlock_irq(&np->lock);
1658 return 0;
1659}
1660
1661static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1662{
1663 struct netdev_private *np = netdev_priv(dev);
1664 int res;
1665 spin_lock_irq(&np->lock);
1666 res = mii_ethtool_sset(&np->mii_if, ecmd);
1667 spin_unlock_irq(&np->lock);
1668 return res;
1669}
1670
1671static int nway_reset(struct net_device *dev)
1672{
1673 struct netdev_private *np = netdev_priv(dev);
1674 return mii_nway_restart(&np->mii_if);
1675}
1676
1677static u32 get_link(struct net_device *dev)
1678{
1679 struct netdev_private *np = netdev_priv(dev);
1680 return mii_link_ok(&np->mii_if);
1681}
1682
1683static u32 get_msglevel(struct net_device *dev)
1684{
1685 struct netdev_private *np = netdev_priv(dev);
1686 return np->msg_enable;
1687}
1688
1689static void set_msglevel(struct net_device *dev, u32 val)
1690{
1691 struct netdev_private *np = netdev_priv(dev);
1692 np->msg_enable = val;
1693}
1694
725a4a46
DK
1695static void get_strings(struct net_device *dev, u32 stringset,
1696 u8 *data)
1697{
1698 if (stringset == ETH_SS_STATS)
1699 memcpy(data, sundance_stats, sizeof(sundance_stats));
1700}
1701
1702static int get_sset_count(struct net_device *dev, int sset)
1703{
1704 switch (sset) {
1705 case ETH_SS_STATS:
1706 return ARRAY_SIZE(sundance_stats);
1707 default:
1708 return -EOPNOTSUPP;
1709 }
1710}
1711
1712static void get_ethtool_stats(struct net_device *dev,
1713 struct ethtool_stats *stats, u64 *data)
1714{
1715 struct netdev_private *np = netdev_priv(dev);
1716 int i = 0;
1717
1718 get_stats(dev);
1719 data[i++] = np->xstats.tx_multiple_collisions;
1720 data[i++] = np->xstats.tx_single_collisions;
1721 data[i++] = np->xstats.tx_late_collisions;
1722 data[i++] = np->xstats.tx_deferred;
1723 data[i++] = np->xstats.tx_deferred_excessive;
1724 data[i++] = np->xstats.tx_aborted;
1725 data[i++] = np->xstats.tx_bcasts;
1726 data[i++] = np->xstats.rx_bcasts;
1727 data[i++] = np->xstats.tx_mcasts;
1728 data[i++] = np->xstats.rx_mcasts;
1729}
1730
f210e87b
DK
1731#ifdef CONFIG_PM
1732
1733static void sundance_get_wol(struct net_device *dev,
1734 struct ethtool_wolinfo *wol)
1735{
1736 struct netdev_private *np = netdev_priv(dev);
1737 void __iomem *ioaddr = np->base;
1738 u8 wol_bits;
1739
1740 wol->wolopts = 0;
1741
1742 wol->supported = (WAKE_PHY | WAKE_MAGIC);
1743 if (!np->wol_enabled)
1744 return;
1745
1746 wol_bits = ioread8(ioaddr + WakeEvent);
1747 if (wol_bits & MagicPktEnable)
1748 wol->wolopts |= WAKE_MAGIC;
1749 if (wol_bits & LinkEventEnable)
1750 wol->wolopts |= WAKE_PHY;
1751}
1752
1753static int sundance_set_wol(struct net_device *dev,
1754 struct ethtool_wolinfo *wol)
1755{
1756 struct netdev_private *np = netdev_priv(dev);
1757 void __iomem *ioaddr = np->base;
1758 u8 wol_bits;
1759
1760 if (!device_can_wakeup(&np->pci_dev->dev))
1761 return -EOPNOTSUPP;
1762
1763 np->wol_enabled = !!(wol->wolopts);
1764 wol_bits = ioread8(ioaddr + WakeEvent);
1765 wol_bits &= ~(WakePktEnable | MagicPktEnable |
1766 LinkEventEnable | WolEnable);
1767
1768 if (np->wol_enabled) {
1769 if (wol->wolopts & WAKE_MAGIC)
1770 wol_bits |= (MagicPktEnable | WolEnable);
1771 if (wol->wolopts & WAKE_PHY)
1772 wol_bits |= (LinkEventEnable | WolEnable);
1773 }
1774 iowrite8(wol_bits, ioaddr + WakeEvent);
1775
1776 device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1777
1778 return 0;
1779}
1780#else
1781#define sundance_get_wol NULL
1782#define sundance_set_wol NULL
1783#endif /* CONFIG_PM */
1784
7282d491 1785static const struct ethtool_ops ethtool_ops = {
1da177e4
LT
1786 .begin = check_if_running,
1787 .get_drvinfo = get_drvinfo,
1788 .get_settings = get_settings,
1789 .set_settings = set_settings,
1790 .nway_reset = nway_reset,
1791 .get_link = get_link,
f210e87b
DK
1792 .get_wol = sundance_get_wol,
1793 .set_wol = sundance_set_wol,
1da177e4
LT
1794 .get_msglevel = get_msglevel,
1795 .set_msglevel = set_msglevel,
725a4a46
DK
1796 .get_strings = get_strings,
1797 .get_sset_count = get_sset_count,
1798 .get_ethtool_stats = get_ethtool_stats,
1da177e4
LT
1799};
1800
1801static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1802{
1803 struct netdev_private *np = netdev_priv(dev);
1da177e4 1804 int rc;
1da177e4
LT
1805
1806 if (!netif_running(dev))
1807 return -EINVAL;
1808
1809 spin_lock_irq(&np->lock);
1810 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1811 spin_unlock_irq(&np->lock);
1da177e4
LT
1812
1813 return rc;
1814}
1815
1816static int netdev_close(struct net_device *dev)
1817{
1818 struct netdev_private *np = netdev_priv(dev);
1819 void __iomem *ioaddr = np->base;
1820 struct sk_buff *skb;
1821 int i;
1822
31f817e9
JH
1823 /* Wait and kill tasklet */
1824 tasklet_kill(&np->rx_tasklet);
1825 tasklet_kill(&np->tx_tasklet);
1826 np->cur_tx = 0;
1827 np->dirty_tx = 0;
1828 np->cur_task = 0;
bca79eb7 1829 np->last_tx = NULL;
31f817e9 1830
1da177e4
LT
1831 netif_stop_queue(dev);
1832
1833 if (netif_msg_ifdown(np)) {
1834 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1835 "Rx %4.4x Int %2.2x.\n",
1836 dev->name, ioread8(ioaddr + TxStatus),
1837 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1838 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1839 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1840 }
1841
1842 /* Disable interrupts by clearing the interrupt mask. */
1843 iowrite16(0x0000, ioaddr + IntrEnable);
1844
acd70c2b
JH
1845 /* Disable Rx and Tx DMA for safely release resource */
1846 iowrite32(0x500, ioaddr + DMACtrl);
1847
1da177e4
LT
1848 /* Stop the chip's Tx and Rx processes. */
1849 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1850
31f817e9
JH
1851 for (i = 2000; i > 0; i--) {
1852 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1853 break;
1854 mdelay(1);
1855 }
1856
1857 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
24de5285 1858 ioaddr + ASIC_HI_WORD(ASICCtrl));
31f817e9
JH
1859
1860 for (i = 2000; i > 0; i--) {
24de5285 1861 if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
31f817e9
JH
1862 break;
1863 mdelay(1);
1864 }
1da177e4
LT
1865
1866#ifdef __i386__
1867 if (netif_msg_hw(np)) {
ad361c98 1868 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1da177e4
LT
1869 (int)(np->tx_ring_dma));
1870 for (i = 0; i < TX_RING_SIZE; i++)
ad361c98 1871 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1da177e4
LT
1872 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1873 np->tx_ring[i].frag[0].length);
ad361c98 1874 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1da177e4
LT
1875 (int)(np->rx_ring_dma));
1876 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1877 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1878 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1879 np->rx_ring[i].frag[0].length);
1880 }
1881 }
1882#endif /* __i386__ debugging only */
1883
c514f285 1884 free_irq(np->pci_dev->irq, dev);
1da177e4
LT
1885
1886 del_timer_sync(&np->timer);
1887
1888 /* Free all the skbuffs in the Rx queue. */
1889 for (i = 0; i < RX_RING_SIZE; i++) {
1890 np->rx_ring[i].status = 0;
1da177e4
LT
1891 skb = np->rx_skbuff[i];
1892 if (skb) {
0c8a745f 1893 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1894 le32_to_cpu(np->rx_ring[i].frag[0].addr),
0c8a745f 1895 np->rx_buf_sz, DMA_FROM_DEVICE);
1da177e4
LT
1896 dev_kfree_skb(skb);
1897 np->rx_skbuff[i] = NULL;
1898 }
14c9d9b0 1899 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1da177e4
LT
1900 }
1901 for (i = 0; i < TX_RING_SIZE; i++) {
31f817e9 1902 np->tx_ring[i].next_desc = 0;
1da177e4
LT
1903 skb = np->tx_skbuff[i];
1904 if (skb) {
0c8a745f 1905 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1906 le32_to_cpu(np->tx_ring[i].frag[0].addr),
0c8a745f 1907 skb->len, DMA_TO_DEVICE);
1da177e4
LT
1908 dev_kfree_skb(skb);
1909 np->tx_skbuff[i] = NULL;
1910 }
1911 }
1912
1913 return 0;
1914}
1915
64bc40de 1916static void sundance_remove1(struct pci_dev *pdev)
1da177e4
LT
1917{
1918 struct net_device *dev = pci_get_drvdata(pdev);
1919
1920 if (dev) {
0c8a745f
DK
1921 struct netdev_private *np = netdev_priv(dev);
1922 unregister_netdev(dev);
1923 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1924 np->rx_ring, np->rx_ring_dma);
1925 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1926 np->tx_ring, np->tx_ring_dma);
1927 pci_iounmap(pdev, np->base);
1928 pci_release_regions(pdev);
1929 free_netdev(dev);
1930 pci_set_drvdata(pdev, NULL);
1da177e4
LT
1931 }
1932}
1933
61a21455
DK
1934#ifdef CONFIG_PM
1935
1936static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1937{
1938 struct net_device *dev = pci_get_drvdata(pci_dev);
f210e87b
DK
1939 struct netdev_private *np = netdev_priv(dev);
1940 void __iomem *ioaddr = np->base;
61a21455
DK
1941
1942 if (!netif_running(dev))
1943 return 0;
1944
1945 netdev_close(dev);
1946 netif_device_detach(dev);
1947
1948 pci_save_state(pci_dev);
f210e87b
DK
1949 if (np->wol_enabled) {
1950 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1951 iowrite16(RxEnable, ioaddr + MACCtrl1);
1952 }
1953 pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1954 np->wol_enabled);
61a21455
DK
1955 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1956
1957 return 0;
1958}
1959
1960static int sundance_resume(struct pci_dev *pci_dev)
1961{
1962 struct net_device *dev = pci_get_drvdata(pci_dev);
1963 int err = 0;
1964
1965 if (!netif_running(dev))
1966 return 0;
1967
1968 pci_set_power_state(pci_dev, PCI_D0);
1969 pci_restore_state(pci_dev);
f210e87b 1970 pci_enable_wake(pci_dev, PCI_D0, 0);
61a21455
DK
1971
1972 err = netdev_open(dev);
1973 if (err) {
1974 printk(KERN_ERR "%s: Can't resume interface!\n",
1975 dev->name);
1976 goto out;
1977 }
1978
1979 netif_device_attach(dev);
1980
1981out:
1982 return err;
1983}
1984
1985#endif /* CONFIG_PM */
1986
1da177e4
LT
1987static struct pci_driver sundance_driver = {
1988 .name = DRV_NAME,
1989 .id_table = sundance_pci_tbl,
1990 .probe = sundance_probe1,
64bc40de 1991 .remove = sundance_remove1,
61a21455
DK
1992#ifdef CONFIG_PM
1993 .suspend = sundance_suspend,
1994 .resume = sundance_resume,
1995#endif /* CONFIG_PM */
1da177e4
LT
1996};
1997
1998static int __init sundance_init(void)
1999{
2000/* when a module, this is printed whether or not devices are found in probe */
2001#ifdef MODULE
2002 printk(version);
2003#endif
29917620 2004 return pci_register_driver(&sundance_driver);
1da177e4
LT
2005}
2006
2007static void __exit sundance_exit(void)
2008{
2009 pci_unregister_driver(&sundance_driver);
2010}
2011
2012module_init(sundance_init);
2013module_exit(sundance_exit);
2014
2015