Magic number prefix consistency change to Documentation/magic-number.txt
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / epic100.c
CommitLineData
1da177e4
LT
1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2/*
3 Written/copyright 1997-2001 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
19
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
36e1e847 22 [this link no longer provides anything useful -jgarzik]
1da177e4
LT
23
24 ---------------------------------------------------------------------
f3b197ac 25
1da177e4
LT
26*/
27
28#define DRV_NAME "epic100"
d5b20697
AG
29#define DRV_VERSION "2.1"
30#define DRV_RELDATE "Sept 11, 2006"
1da177e4
LT
31
32/* The user-configurable values.
33 These may be modified when a driver module is loaded.*/
34
35static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
36
37/* Used to pass the full-duplex flag, etc. */
38#define MAX_UNITS 8 /* More are supported, limit only on options */
39static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41
42/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43 Setting to > 1518 effectively disables this feature. */
44static int rx_copybreak;
45
46/* Operational parameters that are set at compile time. */
47
48/* Keep the ring sizes a power of two for operational efficiency.
49 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50 Making the Tx ring too large decreases the effectiveness of channel
51 bonding and packet priority.
52 There are no ill effects from too-large receive rings. */
53#define TX_RING_SIZE 256
54#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
55#define RX_RING_SIZE 256
56#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
57#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
58
59/* Operational parameters that usually are not changed. */
60/* Time in jiffies before concluding the transmitter is hung. */
61#define TX_TIMEOUT (2*HZ)
62
63#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
64
65/* Bytes transferred to chip before transmission starts. */
66/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67#define TX_FIFO_THRESH 256
68#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
69
1da177e4
LT
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/timer.h>
74#include <linux/errno.h>
75#include <linux/ioport.h>
76#include <linux/slab.h>
77#include <linux/interrupt.h>
78#include <linux/pci.h>
79#include <linux/delay.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/init.h>
84#include <linux/spinlock.h>
85#include <linux/ethtool.h>
86#include <linux/mii.h>
87#include <linux/crc32.h>
88#include <linux/bitops.h>
89#include <asm/io.h>
90#include <asm/uaccess.h>
91
92/* These identify the driver base version and may not be removed. */
93static char version[] __devinitdata =
94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95static char version2[] __devinitdata =
96" http://www.scyld.com/network/epic100.html\n";
97static char version3[] __devinitdata =
98" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
99
100MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
101MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
102MODULE_LICENSE("GPL");
103
104module_param(debug, int, 0);
105module_param(rx_copybreak, int, 0);
106module_param_array(options, int, NULL, 0);
107module_param_array(full_duplex, int, NULL, 0);
108MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
109MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
110MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
111MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
112
113/*
114 Theory of Operation
115
116I. Board Compatibility
117
118This device driver is designed for the SMC "EPIC/100", the SMC
119single-chip Ethernet controllers for PCI. This chip is used on
120the SMC EtherPower II boards.
121
122II. Board-specific settings
123
124PCI bus devices are configured by the system at boot time, so no jumpers
125need to be set on the board. The system BIOS will assign the
126PCI INTA signal to a (preferably otherwise unused) system IRQ line.
127Note: Kernel versions earlier than 1.3.73 do not support shared PCI
128interrupt lines.
129
130III. Driver operation
131
132IIIa. Ring buffers
133
134IVb. References
135
136http://www.smsc.com/main/datasheets/83c171.pdf
137http://www.smsc.com/main/datasheets/83c175.pdf
138http://scyld.com/expert/NWay.html
139http://www.national.com/pf/DP/DP83840A.html
140
141IVc. Errata
142
143*/
144
145
1da177e4
LT
146enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
147
148#define EPIC_TOTAL_SIZE 0x100
149#define USE_IO_OPS 1
1da177e4
LT
150
151typedef enum {
152 SMSC_83C170_0,
153 SMSC_83C170,
154 SMSC_83C175,
155} chip_t;
156
157
158struct epic_chip_info {
159 const char *name;
1da177e4
LT
160 int drv_flags; /* Driver use, intended as capability flags. */
161};
162
163
164/* indexed by chip_t */
f71e1309 165static const struct epic_chip_info pci_id_tbl[] = {
36e1e847
JG
166 { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN },
167 { "SMSC EPIC/100 83c170", TYPE2_INTR },
168 { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN },
1da177e4
LT
169};
170
171
172static struct pci_device_id epic_pci_tbl[] = {
173 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
174 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
175 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
176 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
177 { 0,}
178};
179MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
180
f3b197ac 181
1da177e4
LT
182#ifndef USE_IO_OPS
183#undef inb
184#undef inw
185#undef inl
186#undef outb
187#undef outw
188#undef outl
189#define inb readb
190#define inw readw
191#define inl readl
192#define outb writeb
193#define outw writew
194#define outl writel
195#endif
196
197/* Offsets to registers, using the (ugh) SMC names. */
198enum epic_registers {
199 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
200 PCIBurstCnt=0x18,
201 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
202 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
203 LAN0=64, /* MAC address. */
204 MC0=80, /* Multicast filter table. */
205 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
206 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
207};
208
209/* Interrupt register bits, using my own meaningful names. */
210enum IntrStatus {
211 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
212 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
213 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
214 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
215 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
216};
217enum CommandBits {
218 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
219 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
220};
221
222#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
223
224#define EpicNapiEvent (TxEmpty | TxDone | \
225 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
226#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
227
f71e1309 228static const u16 media2miictl[16] = {
1da177e4
LT
229 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
230 0, 0, 0, 0, 0, 0, 0, 0 };
231
232/* The EPIC100 Rx and Tx buffer descriptors. */
233
234struct epic_tx_desc {
235 u32 txstatus;
236 u32 bufaddr;
237 u32 buflength;
238 u32 next;
239};
240
241struct epic_rx_desc {
242 u32 rxstatus;
243 u32 bufaddr;
244 u32 buflength;
245 u32 next;
246};
247
248enum desc_status_bits {
249 DescOwn=0x8000,
250};
251
252#define PRIV_ALIGN 15 /* Required alignment mask */
253struct epic_private {
254 struct epic_rx_desc *rx_ring;
255 struct epic_tx_desc *tx_ring;
256 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
257 struct sk_buff* tx_skbuff[TX_RING_SIZE];
258 /* The addresses of receive-in-place skbuffs. */
259 struct sk_buff* rx_skbuff[RX_RING_SIZE];
260
261 dma_addr_t tx_ring_dma;
262 dma_addr_t rx_ring_dma;
263
264 /* Ring pointers. */
265 spinlock_t lock; /* Group with Tx control cache line. */
266 spinlock_t napi_lock;
267 unsigned int reschedule_in_poll;
268 unsigned int cur_tx, dirty_tx;
269
270 unsigned int cur_rx, dirty_rx;
271 u32 irq_mask;
272 unsigned int rx_buf_sz; /* Based on MTU+slack. */
273
274 struct pci_dev *pci_dev; /* PCI bus location. */
275 int chip_id, chip_flags;
276
277 struct net_device_stats stats;
278 struct timer_list timer; /* Media selection timer. */
279 int tx_threshold;
280 unsigned char mc_filter[8];
281 signed char phys[4]; /* MII device addresses. */
282 u16 advertising; /* NWay media advertisement */
283 int mii_phy_cnt;
284 struct mii_if_info mii;
285 unsigned int tx_full:1; /* The Tx queue is full. */
286 unsigned int default_port:4; /* Last dev->if_port value. */
287};
288
289static int epic_open(struct net_device *dev);
290static int read_eeprom(long ioaddr, int location);
291static int mdio_read(struct net_device *dev, int phy_id, int location);
292static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
293static void epic_restart(struct net_device *dev);
294static void epic_timer(unsigned long data);
295static void epic_tx_timeout(struct net_device *dev);
296static void epic_init_ring(struct net_device *dev);
297static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
298static int epic_rx(struct net_device *dev, int budget);
299static int epic_poll(struct net_device *dev, int *budget);
7d12e780 300static irqreturn_t epic_interrupt(int irq, void *dev_instance);
1da177e4 301static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7282d491 302static const struct ethtool_ops netdev_ethtool_ops;
1da177e4
LT
303static int epic_close(struct net_device *dev);
304static struct net_device_stats *epic_get_stats(struct net_device *dev);
305static void set_rx_mode(struct net_device *dev);
306
f3b197ac 307
1da177e4
LT
308
309static int __devinit epic_init_one (struct pci_dev *pdev,
310 const struct pci_device_id *ent)
311{
312 static int card_idx = -1;
313 long ioaddr;
314 int chip_idx = (int) ent->driver_data;
315 int irq;
316 struct net_device *dev;
317 struct epic_private *ep;
318 int i, ret, option = 0, duplex = 0;
319 void *ring_space;
320 dma_addr_t ring_dma;
321
322/* when built into the kernel, we only print version if device is found */
323#ifndef MODULE
324 static int printed_version;
325 if (!printed_version++)
326 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
327 version, version2, version3);
328#endif
f3b197ac 329
1da177e4 330 card_idx++;
f3b197ac 331
1da177e4
LT
332 ret = pci_enable_device(pdev);
333 if (ret)
334 goto out;
335 irq = pdev->irq;
336
36e1e847 337 if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
9b91cf9d 338 dev_err(&pdev->dev, "no PCI region space\n");
1da177e4
LT
339 ret = -ENODEV;
340 goto err_out_disable;
341 }
f3b197ac 342
1da177e4
LT
343 pci_set_master(pdev);
344
345 ret = pci_request_regions(pdev, DRV_NAME);
346 if (ret < 0)
347 goto err_out_disable;
348
349 ret = -ENOMEM;
350
351 dev = alloc_etherdev(sizeof (*ep));
352 if (!dev) {
9b91cf9d 353 dev_err(&pdev->dev, "no memory for eth device\n");
1da177e4
LT
354 goto err_out_free_res;
355 }
356 SET_MODULE_OWNER(dev);
357 SET_NETDEV_DEV(dev, &pdev->dev);
358
359#ifdef USE_IO_OPS
360 ioaddr = pci_resource_start (pdev, 0);
361#else
362 ioaddr = pci_resource_start (pdev, 1);
363 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
364 if (!ioaddr) {
9b91cf9d 365 dev_err(&pdev->dev, "ioremap failed\n");
1da177e4
LT
366 goto err_out_free_netdev;
367 }
368#endif
369
370 pci_set_drvdata(pdev, dev);
371 ep = dev->priv;
372 ep->mii.dev = dev;
373 ep->mii.mdio_read = mdio_read;
374 ep->mii.mdio_write = mdio_write;
375 ep->mii.phy_id_mask = 0x1f;
376 ep->mii.reg_num_mask = 0x1f;
377
378 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
379 if (!ring_space)
380 goto err_out_iounmap;
381 ep->tx_ring = (struct epic_tx_desc *)ring_space;
382 ep->tx_ring_dma = ring_dma;
383
384 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
385 if (!ring_space)
386 goto err_out_unmap_tx;
387 ep->rx_ring = (struct epic_rx_desc *)ring_space;
388 ep->rx_ring_dma = ring_dma;
389
390 if (dev->mem_start) {
391 option = dev->mem_start;
392 duplex = (dev->mem_start & 16) ? 1 : 0;
393 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
394 if (options[card_idx] >= 0)
395 option = options[card_idx];
396 if (full_duplex[card_idx] >= 0)
397 duplex = full_duplex[card_idx];
398 }
399
400 dev->base_addr = ioaddr;
401 dev->irq = irq;
402
403 spin_lock_init(&ep->lock);
404 spin_lock_init(&ep->napi_lock);
405 ep->reschedule_in_poll = 0;
406
407 /* Bring the chip out of low-power mode. */
408 outl(0x4200, ioaddr + GENCTL);
409 /* Magic?! If we don't set this bit the MII interface won't work. */
410 /* This magic is documented in SMSC app note 7.15 */
411 for (i = 16; i > 0; i--)
412 outl(0x0008, ioaddr + TEST1);
413
414 /* Turn on the MII transceiver. */
415 outl(0x12, ioaddr + MIICfg);
416 if (chip_idx == 1)
417 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
418 outl(0x0200, ioaddr + GENCTL);
419
420 /* Note: the '175 does not have a serial EEPROM. */
421 for (i = 0; i < 3; i++)
422 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
423
424 if (debug > 2) {
2e8a538d 425 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
1da177e4
LT
426 for (i = 0; i < 64; i++)
427 printk(" %4.4x%s", read_eeprom(ioaddr, i),
428 i % 16 == 15 ? "\n" : "");
429 }
430
431 ep->pci_dev = pdev;
432 ep->chip_id = chip_idx;
433 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
f3b197ac 434 ep->irq_mask =
1da177e4
LT
435 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
436 | CntFull | TxUnderrun | EpicNapiEvent;
437
438 /* Find the connected MII xcvrs.
439 Doing this in open() would allow detecting external xcvrs later, but
440 takes much time and no cards have external MII. */
441 {
442 int phy, phy_idx = 0;
443 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
444 int mii_status = mdio_read(dev, phy, MII_BMSR);
445 if (mii_status != 0xffff && mii_status != 0x0000) {
446 ep->phys[phy_idx++] = phy;
9b91cf9d 447 dev_info(&pdev->dev,
2e8a538d
JG
448 "MII transceiver #%d control "
449 "%4.4x status %4.4x.\n",
450 phy, mdio_read(dev, phy, 0), mii_status);
1da177e4
LT
451 }
452 }
453 ep->mii_phy_cnt = phy_idx;
454 if (phy_idx != 0) {
455 phy = ep->phys[0];
456 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
9b91cf9d 457 dev_info(&pdev->dev,
2e8a538d 458 "Autonegotiation advertising %4.4x link "
1da177e4 459 "partner %4.4x.\n",
2e8a538d 460 ep->mii.advertising, mdio_read(dev, phy, 5));
1da177e4 461 } else if ( ! (ep->chip_flags & NO_MII)) {
9b91cf9d 462 dev_warn(&pdev->dev,
2e8a538d 463 "***WARNING***: No MII transceiver found!\n");
1da177e4
LT
464 /* Use the known PHY address of the EPII. */
465 ep->phys[0] = 3;
466 }
467 ep->mii.phy_id = ep->phys[0];
468 }
469
470 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
471 if (ep->chip_flags & MII_PWRDWN)
472 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
473 outl(0x0008, ioaddr + GENCTL);
474
475 /* The lower four bits are the media type. */
476 if (duplex) {
477 ep->mii.force_media = ep->mii.full_duplex = 1;
9b91cf9d 478 dev_info(&pdev->dev, "Forced full duplex requested.\n");
1da177e4
LT
479 }
480 dev->if_port = ep->default_port = option;
481
482 /* The Epic-specific entries in the device structure. */
483 dev->open = &epic_open;
484 dev->hard_start_xmit = &epic_start_xmit;
485 dev->stop = &epic_close;
486 dev->get_stats = &epic_get_stats;
487 dev->set_multicast_list = &set_rx_mode;
488 dev->do_ioctl = &netdev_ioctl;
489 dev->ethtool_ops = &netdev_ethtool_ops;
490 dev->watchdog_timeo = TX_TIMEOUT;
491 dev->tx_timeout = &epic_tx_timeout;
492 dev->poll = epic_poll;
493 dev->weight = 64;
494
495 ret = register_netdev(dev);
496 if (ret < 0)
497 goto err_out_unmap_rx;
498
499 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
500 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
501 for (i = 0; i < 5; i++)
502 printk("%2.2x:", dev->dev_addr[i]);
503 printk("%2.2x.\n", dev->dev_addr[i]);
504
505out:
506 return ret;
507
508err_out_unmap_rx:
509 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
510err_out_unmap_tx:
511 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
512err_out_iounmap:
513#ifndef USE_IO_OPS
514 iounmap(ioaddr);
515err_out_free_netdev:
516#endif
517 free_netdev(dev);
518err_out_free_res:
519 pci_release_regions(pdev);
520err_out_disable:
521 pci_disable_device(pdev);
522 goto out;
523}
f3b197ac 524
1da177e4
LT
525/* Serial EEPROM section. */
526
527/* EEPROM_Ctrl bits. */
528#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
529#define EE_CS 0x02 /* EEPROM chip select. */
530#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
531#define EE_WRITE_0 0x01
532#define EE_WRITE_1 0x09
533#define EE_DATA_READ 0x10 /* EEPROM chip data out. */
534#define EE_ENB (0x0001 | EE_CS)
535
536/* Delay between EEPROM clock transitions.
537 This serves to flush the operation to the PCI bus.
538 */
539
540#define eeprom_delay() inl(ee_addr)
541
542/* The EEPROM commands include the alway-set leading bit. */
543#define EE_WRITE_CMD (5 << 6)
544#define EE_READ64_CMD (6 << 6)
545#define EE_READ256_CMD (6 << 8)
546#define EE_ERASE_CMD (7 << 6)
547
548static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
549{
550 long ioaddr = dev->base_addr;
551
552 outl(0x00000000, ioaddr + INTMASK);
553}
554
555static inline void __epic_pci_commit(long ioaddr)
556{
557#ifndef USE_IO_OPS
558 inl(ioaddr + INTMASK);
559#endif
560}
561
562static inline void epic_napi_irq_off(struct net_device *dev,
563 struct epic_private *ep)
564{
565 long ioaddr = dev->base_addr;
566
567 outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
568 __epic_pci_commit(ioaddr);
569}
570
571static inline void epic_napi_irq_on(struct net_device *dev,
572 struct epic_private *ep)
573{
574 long ioaddr = dev->base_addr;
575
576 /* No need to commit possible posted write */
577 outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
578}
579
580static int __devinit read_eeprom(long ioaddr, int location)
581{
582 int i;
583 int retval = 0;
584 long ee_addr = ioaddr + EECTL;
585 int read_cmd = location |
586 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
587
588 outl(EE_ENB & ~EE_CS, ee_addr);
589 outl(EE_ENB, ee_addr);
590
591 /* Shift the read command bits out. */
592 for (i = 12; i >= 0; i--) {
593 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
594 outl(EE_ENB | dataval, ee_addr);
595 eeprom_delay();
596 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
597 eeprom_delay();
598 }
599 outl(EE_ENB, ee_addr);
600
601 for (i = 16; i > 0; i--) {
602 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
603 eeprom_delay();
604 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
605 outl(EE_ENB, ee_addr);
606 eeprom_delay();
607 }
608
609 /* Terminate the EEPROM access. */
610 outl(EE_ENB & ~EE_CS, ee_addr);
611 return retval;
612}
613
614#define MII_READOP 1
615#define MII_WRITEOP 2
616static int mdio_read(struct net_device *dev, int phy_id, int location)
617{
618 long ioaddr = dev->base_addr;
619 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
620 int i;
621
622 outl(read_cmd, ioaddr + MIICtrl);
623 /* Typical operation takes 25 loops. */
624 for (i = 400; i > 0; i--) {
625 barrier();
626 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
627 /* Work around read failure bug. */
628 if (phy_id == 1 && location < 6
629 && inw(ioaddr + MIIData) == 0xffff) {
630 outl(read_cmd, ioaddr + MIICtrl);
631 continue;
632 }
633 return inw(ioaddr + MIIData);
634 }
635 }
636 return 0xffff;
637}
638
639static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
640{
641 long ioaddr = dev->base_addr;
642 int i;
643
644 outw(value, ioaddr + MIIData);
645 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
f3b197ac 646 for (i = 10000; i > 0; i--) {
1da177e4
LT
647 barrier();
648 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
649 break;
650 }
651 return;
652}
653
f3b197ac 654
1da177e4
LT
655static int epic_open(struct net_device *dev)
656{
657 struct epic_private *ep = dev->priv;
658 long ioaddr = dev->base_addr;
659 int i;
660 int retval;
661
662 /* Soft reset the chip. */
663 outl(0x4001, ioaddr + GENCTL);
664
1fb9df5d 665 if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev)))
1da177e4
LT
666 return retval;
667
668 epic_init_ring(dev);
669
670 outl(0x4000, ioaddr + GENCTL);
671 /* This magic is documented in SMSC app note 7.15 */
672 for (i = 16; i > 0; i--)
673 outl(0x0008, ioaddr + TEST1);
674
675 /* Pull the chip out of low-power mode, enable interrupts, and set for
676 PCI read multiple. The MIIcfg setting and strange write order are
677 required by the details of which bits are reset and the transceiver
678 wiring on the Ositech CardBus card.
679 */
680#if 0
681 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
682#endif
683 if (ep->chip_flags & MII_PWRDWN)
684 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
685
686#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
687 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
688 inl(ioaddr + GENCTL);
689 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
690#else
691 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
692 inl(ioaddr + GENCTL);
693 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
694#endif
695
696 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
f3b197ac 697
1da177e4
LT
698 for (i = 0; i < 3; i++)
699 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
700
701 ep->tx_threshold = TX_FIFO_THRESH;
702 outl(ep->tx_threshold, ioaddr + TxThresh);
703
704 if (media2miictl[dev->if_port & 15]) {
705 if (ep->mii_phy_cnt)
706 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
707 if (dev->if_port == 1) {
708 if (debug > 1)
709 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
710 "status %4.4x.\n",
711 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
712 }
713 } else {
714 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
715 if (mii_lpa != 0xffff) {
716 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
717 ep->mii.full_duplex = 1;
718 else if (! (mii_lpa & LPA_LPACK))
719 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
720 if (debug > 1)
721 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
722 " register read of %4.4x.\n", dev->name,
723 ep->mii.full_duplex ? "full" : "half",
724 ep->phys[0], mii_lpa);
725 }
726 }
727
728 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
729 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
730 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
731
732 /* Start the chip's Rx process. */
733 set_rx_mode(dev);
734 outl(StartRx | RxQueued, ioaddr + COMMAND);
735
736 netif_start_queue(dev);
737
738 /* Enable interrupts by setting the interrupt mask. */
739 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
f3b197ac 740 | CntFull | TxUnderrun
1da177e4
LT
741 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
742
743 if (debug > 1)
744 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
745 "%s-duplex.\n",
746 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
747 ep->mii.full_duplex ? "full" : "half");
748
749 /* Set the timer to switch to check for link beat and perhaps switch
750 to an alternate media type. */
751 init_timer(&ep->timer);
752 ep->timer.expires = jiffies + 3*HZ;
753 ep->timer.data = (unsigned long)dev;
754 ep->timer.function = &epic_timer; /* timer handler */
755 add_timer(&ep->timer);
756
757 return 0;
758}
759
760/* Reset the chip to recover from a PCI transaction error.
761 This may occur at interrupt time. */
762static void epic_pause(struct net_device *dev)
763{
764 long ioaddr = dev->base_addr;
765 struct epic_private *ep = dev->priv;
766
767 netif_stop_queue (dev);
f3b197ac 768
1da177e4
LT
769 /* Disable interrupts by clearing the interrupt mask. */
770 outl(0x00000000, ioaddr + INTMASK);
771 /* Stop the chip's Tx and Rx DMA processes. */
772 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
773
774 /* Update the error counts. */
775 if (inw(ioaddr + COMMAND) != 0xffff) {
776 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
777 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
778 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
779 }
780
781 /* Remove the packets on the Rx queue. */
782 epic_rx(dev, RX_RING_SIZE);
783}
784
785static void epic_restart(struct net_device *dev)
786{
787 long ioaddr = dev->base_addr;
788 struct epic_private *ep = dev->priv;
789 int i;
790
791 /* Soft reset the chip. */
792 outl(0x4001, ioaddr + GENCTL);
793
794 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
795 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
796 udelay(1);
797
798 /* This magic is documented in SMSC app note 7.15 */
799 for (i = 16; i > 0; i--)
800 outl(0x0008, ioaddr + TEST1);
801
802#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
803 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
804#else
805 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
806#endif
807 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
808 if (ep->chip_flags & MII_PWRDWN)
809 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
810
811 for (i = 0; i < 3; i++)
812 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
813
814 ep->tx_threshold = TX_FIFO_THRESH;
815 outl(ep->tx_threshold, ioaddr + TxThresh);
816 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
817 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
818 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
819 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
820 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
821
822 /* Start the chip's Rx process. */
823 set_rx_mode(dev);
824 outl(StartRx | RxQueued, ioaddr + COMMAND);
825
826 /* Enable interrupts by setting the interrupt mask. */
827 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
828 | CntFull | TxUnderrun
829 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
830
831 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
832 " interrupt %4.4x.\n",
833 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
834 (int)inl(ioaddr + INTSTAT));
835 return;
836}
837
838static void check_media(struct net_device *dev)
839{
840 struct epic_private *ep = dev->priv;
841 long ioaddr = dev->base_addr;
842 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
843 int negotiated = mii_lpa & ep->mii.advertising;
844 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
845
846 if (ep->mii.force_media)
847 return;
848 if (mii_lpa == 0xffff) /* Bogus read */
849 return;
850 if (ep->mii.full_duplex != duplex) {
851 ep->mii.full_duplex = duplex;
852 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
853 " partner capability of %4.4x.\n", dev->name,
854 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
855 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
856 }
857}
858
859static void epic_timer(unsigned long data)
860{
861 struct net_device *dev = (struct net_device *)data;
862 struct epic_private *ep = dev->priv;
863 long ioaddr = dev->base_addr;
864 int next_tick = 5*HZ;
865
866 if (debug > 3) {
867 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
868 dev->name, (int)inl(ioaddr + TxSTAT));
869 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
870 "IntStatus %4.4x RxStatus %4.4x.\n",
871 dev->name, (int)inl(ioaddr + INTMASK),
872 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
873 }
874
875 check_media(dev);
876
877 ep->timer.expires = jiffies + next_tick;
878 add_timer(&ep->timer);
879}
880
881static void epic_tx_timeout(struct net_device *dev)
882{
883 struct epic_private *ep = dev->priv;
884 long ioaddr = dev->base_addr;
885
886 if (debug > 0) {
887 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
888 "Tx status %4.4x.\n",
889 dev->name, (int)inw(ioaddr + TxSTAT));
890 if (debug > 1) {
891 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
892 dev->name, ep->dirty_tx, ep->cur_tx);
893 }
894 }
895 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
896 ep->stats.tx_fifo_errors++;
897 outl(RestartTx, ioaddr + COMMAND);
898 } else {
899 epic_restart(dev);
900 outl(TxQueued, dev->base_addr + COMMAND);
901 }
902
903 dev->trans_start = jiffies;
904 ep->stats.tx_errors++;
905 if (!ep->tx_full)
906 netif_wake_queue(dev);
907}
908
909/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
910static void epic_init_ring(struct net_device *dev)
911{
912 struct epic_private *ep = dev->priv;
913 int i;
914
915 ep->tx_full = 0;
916 ep->dirty_tx = ep->cur_tx = 0;
917 ep->cur_rx = ep->dirty_rx = 0;
918 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
919
920 /* Initialize all Rx descriptors. */
921 for (i = 0; i < RX_RING_SIZE; i++) {
922 ep->rx_ring[i].rxstatus = 0;
923 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
f3b197ac 924 ep->rx_ring[i].next = ep->rx_ring_dma +
1da177e4
LT
925 (i+1)*sizeof(struct epic_rx_desc);
926 ep->rx_skbuff[i] = NULL;
927 }
928 /* Mark the last entry as wrapping the ring. */
929 ep->rx_ring[i-1].next = ep->rx_ring_dma;
930
931 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
932 for (i = 0; i < RX_RING_SIZE; i++) {
933 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
934 ep->rx_skbuff[i] = skb;
935 if (skb == NULL)
936 break;
1da177e4 937 skb_reserve(skb, 2); /* 16 byte align the IP header. */
f3b197ac 938 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
689be439 939 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
940 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
941 }
942 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
943
944 /* The Tx buffer descriptor is filled in as needed, but we
945 do need to clear the ownership bit. */
946 for (i = 0; i < TX_RING_SIZE; i++) {
947 ep->tx_skbuff[i] = NULL;
948 ep->tx_ring[i].txstatus = 0x0000;
f3b197ac 949 ep->tx_ring[i].next = ep->tx_ring_dma +
1da177e4
LT
950 (i+1)*sizeof(struct epic_tx_desc);
951 }
952 ep->tx_ring[i-1].next = ep->tx_ring_dma;
953 return;
954}
955
956static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
957{
958 struct epic_private *ep = dev->priv;
959 int entry, free_count;
960 u32 ctrl_word;
961 unsigned long flags;
f3b197ac 962
5b057c6b
HX
963 if (skb_padto(skb, ETH_ZLEN))
964 return 0;
1da177e4
LT
965
966 /* Caution: the write order is important here, set the field with the
967 "ownership" bit last. */
968
969 /* Calculate the next Tx descriptor entry. */
970 spin_lock_irqsave(&ep->lock, flags);
971 free_count = ep->cur_tx - ep->dirty_tx;
972 entry = ep->cur_tx % TX_RING_SIZE;
973
974 ep->tx_skbuff[entry] = skb;
f3b197ac 975 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
1da177e4
LT
976 skb->len, PCI_DMA_TODEVICE);
977 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
978 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
979 } else if (free_count == TX_QUEUE_LEN/2) {
980 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
981 } else if (free_count < TX_QUEUE_LEN - 1) {
982 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
983 } else {
984 /* Leave room for an additional entry. */
985 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
986 ep->tx_full = 1;
987 }
988 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
989 ep->tx_ring[entry].txstatus =
990 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
991 | cpu_to_le32(DescOwn);
992
993 ep->cur_tx++;
994 if (ep->tx_full)
995 netif_stop_queue(dev);
996
997 spin_unlock_irqrestore(&ep->lock, flags);
998 /* Trigger an immediate transmit demand. */
999 outl(TxQueued, dev->base_addr + COMMAND);
1000
1001 dev->trans_start = jiffies;
1002 if (debug > 4)
1003 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1004 "flag %2.2x Tx status %8.8x.\n",
1005 dev->name, (int)skb->len, entry, ctrl_word,
1006 (int)inl(dev->base_addr + TxSTAT));
1007
1008 return 0;
1009}
1010
1011static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1012 int status)
1013{
1014 struct net_device_stats *stats = &ep->stats;
1015
1016#ifndef final_version
1017 /* There was an major error, log it. */
1018 if (debug > 1)
1019 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1020 dev->name, status);
1021#endif
1022 stats->tx_errors++;
1023 if (status & 0x1050)
1024 stats->tx_aborted_errors++;
1025 if (status & 0x0008)
1026 stats->tx_carrier_errors++;
1027 if (status & 0x0040)
1028 stats->tx_window_errors++;
1029 if (status & 0x0010)
1030 stats->tx_fifo_errors++;
1031}
1032
1033static void epic_tx(struct net_device *dev, struct epic_private *ep)
1034{
1035 unsigned int dirty_tx, cur_tx;
1036
1037 /*
1038 * Note: if this lock becomes a problem we can narrow the locked
1039 * region at the cost of occasionally grabbing the lock more times.
1040 */
1041 cur_tx = ep->cur_tx;
1042 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1043 struct sk_buff *skb;
1044 int entry = dirty_tx % TX_RING_SIZE;
1045 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1046
1047 if (txstatus & DescOwn)
1048 break; /* It still hasn't been Txed */
1049
1050 if (likely(txstatus & 0x0001)) {
1051 ep->stats.collisions += (txstatus >> 8) & 15;
1052 ep->stats.tx_packets++;
1053 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1054 } else
1055 epic_tx_error(dev, ep, txstatus);
1056
1057 /* Free the original skb. */
1058 skb = ep->tx_skbuff[entry];
f3b197ac 1059 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1da177e4
LT
1060 skb->len, PCI_DMA_TODEVICE);
1061 dev_kfree_skb_irq(skb);
1062 ep->tx_skbuff[entry] = NULL;
1063 }
1064
1065#ifndef final_version
1066 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1067 printk(KERN_WARNING
1068 "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1069 dev->name, dirty_tx, cur_tx, ep->tx_full);
1070 dirty_tx += TX_RING_SIZE;
1071 }
1072#endif
1073 ep->dirty_tx = dirty_tx;
1074 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1075 /* The ring is no longer full, allow new TX entries. */
1076 ep->tx_full = 0;
1077 netif_wake_queue(dev);
1078 }
1079}
1080
1081/* The interrupt handler does all of the Rx thread work and cleans up
1082 after the Tx thread. */
7d12e780 1083static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1da177e4
LT
1084{
1085 struct net_device *dev = dev_instance;
1086 struct epic_private *ep = dev->priv;
1087 long ioaddr = dev->base_addr;
1088 unsigned int handled = 0;
1089 int status;
1090
1091 status = inl(ioaddr + INTSTAT);
1092 /* Acknowledge all of the current interrupt sources ASAP. */
1093 outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1094
1095 if (debug > 4) {
1096 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1097 "intstat=%#8.8x.\n", dev->name, status,
1098 (int)inl(ioaddr + INTSTAT));
1099 }
1100
1101 if ((status & IntrSummary) == 0)
1102 goto out;
1103
1104 handled = 1;
1105
1106 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1107 spin_lock(&ep->napi_lock);
1108 if (netif_rx_schedule_prep(dev)) {
1109 epic_napi_irq_off(dev, ep);
1110 __netif_rx_schedule(dev);
1111 } else
1112 ep->reschedule_in_poll++;
1113 spin_unlock(&ep->napi_lock);
1114 }
1115 status &= ~EpicNapiEvent;
1116
1117 /* Check uncommon events all at once. */
1118 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1119 if (status == EpicRemoved)
1120 goto out;
1121
1122 /* Always update the error counts to avoid overhead later. */
1123 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1124 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1125 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1126
1127 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1128 ep->stats.tx_fifo_errors++;
1129 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1130 /* Restart the transmit process. */
1131 outl(RestartTx, ioaddr + COMMAND);
1132 }
1133 if (status & PCIBusErr170) {
1134 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1135 dev->name, status);
1136 epic_pause(dev);
1137 epic_restart(dev);
1138 }
1139 /* Clear all error sources. */
1140 outl(status & 0x7f18, ioaddr + INTSTAT);
1141 }
1142
1143out:
1144 if (debug > 3) {
1145 printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1146 dev->name, status);
1147 }
1148
1149 return IRQ_RETVAL(handled);
1150}
1151
1152static int epic_rx(struct net_device *dev, int budget)
1153{
1154 struct epic_private *ep = dev->priv;
1155 int entry = ep->cur_rx % RX_RING_SIZE;
1156 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1157 int work_done = 0;
1158
1159 if (debug > 4)
1160 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1161 ep->rx_ring[entry].rxstatus);
1162
1163 if (rx_work_limit > budget)
1164 rx_work_limit = budget;
1165
1166 /* If we own the next entry, it's a new packet. Send it up. */
1167 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1168 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1169
1170 if (debug > 4)
1171 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1172 if (--rx_work_limit < 0)
1173 break;
1174 if (status & 0x2006) {
1175 if (debug > 2)
1176 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1177 dev->name, status);
1178 if (status & 0x2000) {
1179 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1180 "multiple buffers, status %4.4x!\n", dev->name, status);
1181 ep->stats.rx_length_errors++;
1182 } else if (status & 0x0006)
1183 /* Rx Frame errors are counted in hardware. */
1184 ep->stats.rx_errors++;
1185 } else {
1186 /* Malloc up new buffer, compatible with net-2e. */
1187 /* Omit the four octet CRC from the length. */
1188 short pkt_len = (status >> 16) - 4;
1189 struct sk_buff *skb;
1190
1191 if (pkt_len > PKT_BUF_SZ - 4) {
1192 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1193 "%d bytes.\n",
1194 dev->name, status, pkt_len);
1195 pkt_len = 1514;
1196 }
1197 /* Check if the packet is long enough to accept without copying
1198 to a minimally-sized skbuff. */
1199 if (pkt_len < rx_copybreak
1200 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4
LT
1201 skb_reserve(skb, 2); /* 16 byte align the IP header */
1202 pci_dma_sync_single_for_cpu(ep->pci_dev,
1203 ep->rx_ring[entry].bufaddr,
1204 ep->rx_buf_sz,
1205 PCI_DMA_FROMDEVICE);
689be439 1206 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
1da177e4
LT
1207 skb_put(skb, pkt_len);
1208 pci_dma_sync_single_for_device(ep->pci_dev,
1209 ep->rx_ring[entry].bufaddr,
1210 ep->rx_buf_sz,
1211 PCI_DMA_FROMDEVICE);
1212 } else {
f3b197ac
JG
1213 pci_unmap_single(ep->pci_dev,
1214 ep->rx_ring[entry].bufaddr,
1da177e4
LT
1215 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1216 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1217 ep->rx_skbuff[entry] = NULL;
1218 }
1219 skb->protocol = eth_type_trans(skb, dev);
1220 netif_receive_skb(skb);
1221 dev->last_rx = jiffies;
1222 ep->stats.rx_packets++;
1223 ep->stats.rx_bytes += pkt_len;
1224 }
1225 work_done++;
1226 entry = (++ep->cur_rx) % RX_RING_SIZE;
1227 }
1228
1229 /* Refill the Rx ring buffers. */
1230 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1231 entry = ep->dirty_rx % RX_RING_SIZE;
1232 if (ep->rx_skbuff[entry] == NULL) {
1233 struct sk_buff *skb;
1234 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1235 if (skb == NULL)
1236 break;
1da177e4 1237 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
f3b197ac 1238 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
689be439 1239 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1240 work_done++;
1241 }
1242 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1243 }
1244 return work_done;
1245}
1246
1247static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1248{
1249 long ioaddr = dev->base_addr;
1250 int status;
1251
1252 status = inl(ioaddr + INTSTAT);
1253
1254 if (status == EpicRemoved)
1255 return;
1256 if (status & RxOverflow) /* Missed a Rx frame. */
1257 ep->stats.rx_errors++;
1258 if (status & (RxOverflow | RxFull))
1259 outw(RxQueued, ioaddr + COMMAND);
1260}
1261
1262static int epic_poll(struct net_device *dev, int *budget)
1263{
1264 struct epic_private *ep = dev->priv;
b7b1d202 1265 int work_done = 0, orig_budget;
1da177e4
LT
1266 long ioaddr = dev->base_addr;
1267
1268 orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
1269
1270rx_action:
1271
1272 epic_tx(dev, ep);
1273
b7b1d202 1274 work_done += epic_rx(dev, *budget);
1da177e4
LT
1275
1276 epic_rx_err(dev, ep);
1277
1278 *budget -= work_done;
1279 dev->quota -= work_done;
1280
1281 if (netif_running(dev) && (work_done < orig_budget)) {
1282 unsigned long flags;
1283 int more;
1284
1285 /* A bit baroque but it avoids a (space hungry) spin_unlock */
1286
1287 spin_lock_irqsave(&ep->napi_lock, flags);
1288
1289 more = ep->reschedule_in_poll;
1290 if (!more) {
1291 __netif_rx_complete(dev);
1292 outl(EpicNapiEvent, ioaddr + INTSTAT);
1293 epic_napi_irq_on(dev, ep);
1294 } else
1295 ep->reschedule_in_poll--;
1296
1297 spin_unlock_irqrestore(&ep->napi_lock, flags);
1298
1299 if (more)
1300 goto rx_action;
1301 }
1302
1303 return (work_done >= orig_budget);
1304}
1305
1306static int epic_close(struct net_device *dev)
1307{
1308 long ioaddr = dev->base_addr;
1309 struct epic_private *ep = dev->priv;
1310 struct sk_buff *skb;
1311 int i;
1312
1313 netif_stop_queue(dev);
1314
1315 if (debug > 1)
1316 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1317 dev->name, (int)inl(ioaddr + INTSTAT));
1318
1319 del_timer_sync(&ep->timer);
1320
1321 epic_disable_int(dev, ep);
1322
1323 free_irq(dev->irq, dev);
1324
1325 epic_pause(dev);
1326
1327 /* Free all the skbuffs in the Rx queue. */
1328 for (i = 0; i < RX_RING_SIZE; i++) {
1329 skb = ep->rx_skbuff[i];
1330 ep->rx_skbuff[i] = NULL;
1331 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1332 ep->rx_ring[i].buflength = 0;
1333 if (skb) {
f3b197ac 1334 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1da177e4
LT
1335 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1336 dev_kfree_skb(skb);
1337 }
1338 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1339 }
1340 for (i = 0; i < TX_RING_SIZE; i++) {
1341 skb = ep->tx_skbuff[i];
1342 ep->tx_skbuff[i] = NULL;
1343 if (!skb)
1344 continue;
f3b197ac 1345 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1da177e4
LT
1346 skb->len, PCI_DMA_TODEVICE);
1347 dev_kfree_skb(skb);
1348 }
1349
1350 /* Green! Leave the chip in low-power mode. */
1351 outl(0x0008, ioaddr + GENCTL);
1352
1353 return 0;
1354}
1355
1356static struct net_device_stats *epic_get_stats(struct net_device *dev)
1357{
1358 struct epic_private *ep = dev->priv;
1359 long ioaddr = dev->base_addr;
1360
1361 if (netif_running(dev)) {
1362 /* Update the error counts. */
1363 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1364 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1365 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1366 }
1367
1368 return &ep->stats;
1369}
1370
1371/* Set or clear the multicast filter for this adaptor.
1372 Note that we only use exclusion around actually queueing the
1373 new frame, not around filling ep->setup_frame. This is non-deterministic
1374 when re-entered but still correct. */
1375
1376static void set_rx_mode(struct net_device *dev)
1377{
1378 long ioaddr = dev->base_addr;
1379 struct epic_private *ep = dev->priv;
1380 unsigned char mc_filter[8]; /* Multicast hash filter */
1381 int i;
1382
1383 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1384 outl(0x002C, ioaddr + RxCtrl);
1385 /* Unconditionally log net taps. */
1da177e4
LT
1386 memset(mc_filter, 0xff, sizeof(mc_filter));
1387 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1388 /* There is apparently a chip bug, so the multicast filter
1389 is never enabled. */
1390 /* Too many to filter perfectly -- accept all multicasts. */
1391 memset(mc_filter, 0xff, sizeof(mc_filter));
1392 outl(0x000C, ioaddr + RxCtrl);
1393 } else if (dev->mc_count == 0) {
1394 outl(0x0004, ioaddr + RxCtrl);
1395 return;
1396 } else { /* Never executed, for now. */
1397 struct dev_mc_list *mclist;
1398
1399 memset(mc_filter, 0, sizeof(mc_filter));
1400 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1401 i++, mclist = mclist->next) {
1402 unsigned int bit_nr =
1403 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1404 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1405 }
1406 }
1407 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1408 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1409 for (i = 0; i < 4; i++)
1410 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1411 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1412 }
1413 return;
1414}
1415
1416static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1417{
1418 struct epic_private *np = dev->priv;
1419
1420 strcpy (info->driver, DRV_NAME);
1421 strcpy (info->version, DRV_VERSION);
1422 strcpy (info->bus_info, pci_name(np->pci_dev));
1423}
1424
1425static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1426{
1427 struct epic_private *np = dev->priv;
1428 int rc;
1429
1430 spin_lock_irq(&np->lock);
1431 rc = mii_ethtool_gset(&np->mii, cmd);
1432 spin_unlock_irq(&np->lock);
1433
1434 return rc;
1435}
1436
1437static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1438{
1439 struct epic_private *np = dev->priv;
1440 int rc;
1441
1442 spin_lock_irq(&np->lock);
1443 rc = mii_ethtool_sset(&np->mii, cmd);
1444 spin_unlock_irq(&np->lock);
1445
1446 return rc;
1447}
1448
1449static int netdev_nway_reset(struct net_device *dev)
1450{
1451 struct epic_private *np = dev->priv;
1452 return mii_nway_restart(&np->mii);
1453}
1454
1455static u32 netdev_get_link(struct net_device *dev)
1456{
1457 struct epic_private *np = dev->priv;
1458 return mii_link_ok(&np->mii);
1459}
1460
1461static u32 netdev_get_msglevel(struct net_device *dev)
1462{
1463 return debug;
1464}
1465
1466static void netdev_set_msglevel(struct net_device *dev, u32 value)
1467{
1468 debug = value;
1469}
1470
1471static int ethtool_begin(struct net_device *dev)
1472{
1473 unsigned long ioaddr = dev->base_addr;
1474 /* power-up, if interface is down */
1475 if (! netif_running(dev)) {
1476 outl(0x0200, ioaddr + GENCTL);
1477 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1478 }
1479 return 0;
1480}
1481
1482static void ethtool_complete(struct net_device *dev)
1483{
1484 unsigned long ioaddr = dev->base_addr;
1485 /* power-down, if interface is down */
1486 if (! netif_running(dev)) {
1487 outl(0x0008, ioaddr + GENCTL);
1488 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1489 }
1490}
1491
7282d491 1492static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
1493 .get_drvinfo = netdev_get_drvinfo,
1494 .get_settings = netdev_get_settings,
1495 .set_settings = netdev_set_settings,
1496 .nway_reset = netdev_nway_reset,
1497 .get_link = netdev_get_link,
1498 .get_msglevel = netdev_get_msglevel,
1499 .set_msglevel = netdev_set_msglevel,
1500 .get_sg = ethtool_op_get_sg,
1501 .get_tx_csum = ethtool_op_get_tx_csum,
1502 .begin = ethtool_begin,
1503 .complete = ethtool_complete
1504};
1505
1506static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1507{
1508 struct epic_private *np = dev->priv;
1509 long ioaddr = dev->base_addr;
1510 struct mii_ioctl_data *data = if_mii(rq);
1511 int rc;
1512
1513 /* power-up, if interface is down */
1514 if (! netif_running(dev)) {
1515 outl(0x0200, ioaddr + GENCTL);
1516 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1517 }
1518
1519 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1520 spin_lock_irq(&np->lock);
1521 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1522 spin_unlock_irq(&np->lock);
1523
1524 /* power-down, if interface is down */
1525 if (! netif_running(dev)) {
1526 outl(0x0008, ioaddr + GENCTL);
1527 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1528 }
1529 return rc;
1530}
1531
1532
1533static void __devexit epic_remove_one (struct pci_dev *pdev)
1534{
1535 struct net_device *dev = pci_get_drvdata(pdev);
1536 struct epic_private *ep = dev->priv;
f3b197ac 1537
1da177e4
LT
1538 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1539 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1540 unregister_netdev(dev);
1541#ifndef USE_IO_OPS
1542 iounmap((void*) dev->base_addr);
1543#endif
1544 pci_release_regions(pdev);
1545 free_netdev(dev);
1546 pci_disable_device(pdev);
1547 pci_set_drvdata(pdev, NULL);
1548 /* pci_power_off(pdev, -1); */
1549}
1550
1551
1552#ifdef CONFIG_PM
1553
1554static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1555{
1556 struct net_device *dev = pci_get_drvdata(pdev);
1557 long ioaddr = dev->base_addr;
1558
1559 if (!netif_running(dev))
1560 return 0;
1561 epic_pause(dev);
1562 /* Put the chip into low-power mode. */
1563 outl(0x0008, ioaddr + GENCTL);
1564 /* pci_power_off(pdev, -1); */
1565 return 0;
1566}
1567
1568
1569static int epic_resume (struct pci_dev *pdev)
1570{
1571 struct net_device *dev = pci_get_drvdata(pdev);
1572
1573 if (!netif_running(dev))
1574 return 0;
1575 epic_restart(dev);
1576 /* pci_power_on(pdev); */
1577 return 0;
1578}
1579
1580#endif /* CONFIG_PM */
1581
1582
1583static struct pci_driver epic_driver = {
1584 .name = DRV_NAME,
1585 .id_table = epic_pci_tbl,
1586 .probe = epic_init_one,
1587 .remove = __devexit_p(epic_remove_one),
1588#ifdef CONFIG_PM
1589 .suspend = epic_suspend,
1590 .resume = epic_resume,
1591#endif /* CONFIG_PM */
1592};
1593
1594
1595static int __init epic_init (void)
1596{
1597/* when a module, this is printed whether or not devices are found in probe */
1598#ifdef MODULE
1599 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1600 version, version2, version3);
1601#endif
1602
29917620 1603 return pci_register_driver(&epic_driver);
1da177e4
LT
1604}
1605
1606
1607static void __exit epic_cleanup (void)
1608{
1609 pci_unregister_driver (&epic_driver);
1610}
1611
1612
1613module_init(epic_init);
1614module_exit(epic_cleanup);