[PATCH] sis190: merge some register related information from SiS driver.
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / sis190.c
CommitLineData
890e8d0a
FR
1/*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c and probably even epic100.c.
9
10 This software may be used and distributed according to the terms of
11 the GNU General Public License (GPL), incorporated herein by reference.
12 Drivers based on or derived from this code fall under the GPL and must
13 retain the authorship, copyright and license notice. This file is not
14 a complete program and may only be used when the entire operating
15 system is licensed under the GPL.
16
17 See the file COPYING in this distribution for more information.
18
19 */
20
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/netdevice.h>
43afb949 24#include <linux/rtnetlink.h>
890e8d0a
FR
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/pci.h>
28#include <linux/mii.h>
29#include <linux/delay.h>
30#include <linux/crc32.h>
31#include <linux/dma-mapping.h>
32#include <asm/irq.h>
33
34#define net_drv(p, arg...) if (netif_msg_drv(p)) \
35 printk(arg)
36#define net_probe(p, arg...) if (netif_msg_probe(p)) \
37 printk(arg)
38#define net_link(p, arg...) if (netif_msg_link(p)) \
39 printk(arg)
40#define net_intr(p, arg...) if (netif_msg_intr(p)) \
41 printk(arg)
42#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
43 printk(arg)
44
45#ifdef CONFIG_SIS190_NAPI
46#define NAPI_SUFFIX "-NAPI"
47#else
48#define NAPI_SUFFIX ""
49#endif
50
51#define DRV_VERSION "1.2" NAPI_SUFFIX
52#define DRV_NAME "sis190"
53#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
54#define PFX DRV_NAME ": "
55
56#ifdef CONFIG_SIS190_NAPI
57#define sis190_rx_skb netif_receive_skb
58#define sis190_rx_quota(count, quota) min(count, quota)
59#else
60#define sis190_rx_skb netif_rx
61#define sis190_rx_quota(count, quota) count
62#endif
63
64#define MAC_ADDR_LEN 6
65
66#define NUM_TX_DESC 64
67#define NUM_RX_DESC 64
68#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
69#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
70#define RX_BUF_SIZE 1536
71
72#define SIS190_REGS_SIZE 0x80
73#define SIS190_TX_TIMEOUT (6*HZ)
74#define SIS190_PHY_TIMEOUT (10*HZ)
75#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
77 NETIF_MSG_IFDOWN)
78
79/* Enhanced PHY access register bit definitions */
80#define EhnMIIread 0x0000
81#define EhnMIIwrite 0x0020
82#define EhnMIIdataShift 16
83#define EhnMIIpmdShift 6 /* 7016 only */
84#define EhnMIIregShift 11
85#define EhnMIIreq 0x0010
86#define EhnMIInotDone 0x0010
87
88/* Write/read MMIO register */
89#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
90#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
91#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
92#define SIS_R8(reg) readb (ioaddr + (reg))
93#define SIS_R16(reg) readw (ioaddr + (reg))
94#define SIS_R32(reg) readl (ioaddr + (reg))
95
96#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
97
98enum sis190_registers {
99 TxControl = 0x00,
100 TxDescStartAddr = 0x04,
188f23ba
FR
101 rsv0 = 0x08, // reserved
102 TxSts = 0x0c, // unused (Control/Status)
890e8d0a
FR
103 RxControl = 0x10,
104 RxDescStartAddr = 0x14,
188f23ba
FR
105 rsv1 = 0x18, // reserved
106 RxSts = 0x1c, // unused
890e8d0a
FR
107 IntrStatus = 0x20,
108 IntrMask = 0x24,
109 IntrControl = 0x28,
188f23ba
FR
110 IntrTimer = 0x2c, // unused (Interupt Timer)
111 PMControl = 0x30, // unused (Power Mgmt Control/Status)
112 rsv2 = 0x34, // reserved
890e8d0a
FR
113 ROMControl = 0x38,
114 ROMInterface = 0x3c,
115 StationControl = 0x40,
116 GMIIControl = 0x44,
188f23ba
FR
117 GIoCR = 0x48, // unused (GMAC IO Compensation)
118 GIoCtrl = 0x4c, // unused (GMAC IO Control)
890e8d0a 119 TxMacControl = 0x50,
188f23ba
FR
120 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
121 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
122 rsv3 = 0x5c, // reserved
890e8d0a
FR
123 RxMacControl = 0x60,
124 RxMacAddr = 0x62,
125 RxHashTable = 0x68,
126 // Undocumented = 0x6c,
188f23ba
FR
127 RxWolCtrl = 0x70,
128 RxWolData = 0x74, // unused (Rx WOL Data Access)
129 RxMPSControl = 0x78, // unused (Rx MPS Control)
130 rsv4 = 0x7c, // reserved
890e8d0a
FR
131};
132
133enum sis190_register_content {
134 /* IntrStatus */
135 SoftInt = 0x40000000, // unused
136 Timeup = 0x20000000, // unused
137 PauseFrame = 0x00080000, // unused
138 MagicPacket = 0x00040000, // unused
139 WakeupFrame = 0x00020000, // unused
140 LinkChange = 0x00010000,
141 RxQEmpty = 0x00000080,
142 RxQInt = 0x00000040,
143 TxQ1Empty = 0x00000020, // unused
144 TxQ1Int = 0x00000010,
145 TxQ0Empty = 0x00000008, // unused
146 TxQ0Int = 0x00000004,
147 RxHalt = 0x00000002,
148 TxHalt = 0x00000001,
149
150 /* RxStatusDesc */
151 RxRES = 0x00200000, // unused
152 RxCRC = 0x00080000,
153 RxRUNT = 0x00100000, // unused
154 RxRWT = 0x00400000, // unused
155
156 /* {Rx/Tx}CmdBits */
157 CmdReset = 0x10,
158 CmdRxEnb = 0x08, // unused
159 CmdTxEnb = 0x01,
160 RxBufEmpty = 0x01, // unused
161
162 /* Cfg9346Bits */
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
165
166 /* RxMacControl */
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
173
174 /* RxConfigBits */
175 RxCfgFIFOShift = 13,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
177
178 /* TxConfigBits */
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
181
182 /* StationControl */
183 _1000bpsF = 0x1c00,
184 _1000bpsH = 0x0c00,
185 _100bpsF = 0x1800,
186 _100bpsH = 0x0800,
187 _10bpsF = 0x1400,
188 _10bpsH = 0x0400,
189
190 LinkStatus = 0x02, // unused
191 FullDup = 0x01, // unused
192
193 /* TBICSRBit */
194 TBILinkOK = 0x02000000, // unused
195};
196
197struct TxDesc {
198 u32 PSize;
199 u32 status;
200 u32 addr;
201 u32 size;
202};
203
204struct RxDesc {
205 u32 PSize;
206 u32 status;
207 u32 addr;
208 u32 size;
209};
210
211enum _DescStatusBit {
212 /* _Desc.status */
213 OWNbit = 0x80000000,
214 INTbit = 0x40000000,
215 DEFbit = 0x00200000,
216 CRCbit = 0x00020000,
217 PADbit = 0x00010000,
218 /* _Desc.size */
219 RingEnd = (1 << 31),
220 /* _Desc.PSize */
221 RxSizeMask = 0x0000ffff
222};
223
224struct sis190_private {
225 void __iomem *mmio_addr;
226 struct pci_dev *pci_dev;
227 struct net_device_stats stats;
228 spinlock_t lock;
229 u32 rx_buf_sz;
230 u32 cur_rx;
231 u32 cur_tx;
232 u32 dirty_rx;
233 u32 dirty_tx;
234 dma_addr_t rx_dma;
235 dma_addr_t tx_dma;
236 struct RxDesc *RxDescRing;
237 struct TxDesc *TxDescRing;
238 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
239 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
240 struct work_struct phy_task;
241 struct timer_list timer;
242 u32 msg_enable;
43afb949 243 struct mii_if_info mii_if;
890e8d0a
FR
244};
245
246const static struct {
247 const char *name;
248 u8 version; /* depend on docs */
249 u32 RxConfigMask; /* clear the bits supported by this chip */
250} sis_chip_info[] = {
251 { DRV_NAME, 0x00, 0xff7e1880, },
252};
253
254static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
255 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
256 { 0, },
257};
258
259MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
260
261static int rx_copybreak = 200;
262
263static struct {
264 u32 msg_enable;
265} debug = { -1 };
266
267MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
268module_param(rx_copybreak, int, 0);
269MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
270module_param_named(debug, debug.msg_enable, int, 0);
271MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
272MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
273MODULE_VERSION(DRV_VERSION);
274MODULE_LICENSE("GPL");
275
276static const u32 sis190_intr_mask =
277 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
278
279/*
280 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
281 * The chips use a 64 element hash table based on the Ethernet CRC.
282 */
283static int multicast_filter_limit = 32;
284
285static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
286{
287 unsigned int i;
288
289 SIS_W32(GMIIControl, ctl);
290
291 msleep(1);
292
293 for (i = 0; i < 100; i++) {
294 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
295 break;
296 msleep(1);
297 }
298
299 if (i > 999)
300 printk(KERN_ERR PFX "PHY command failed !\n");
301}
302
303static void mdio_write(void __iomem *ioaddr, int reg, int val)
304{
305 u32 pmd = 1;
306
307 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
308 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
309 (((u32) val) << EhnMIIdataShift));
310}
311
312static int mdio_read(void __iomem *ioaddr, int reg)
313{
314 u32 pmd = 1;
315
316 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
317 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
318
319 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
320}
321
43afb949
FR
322static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
323{
324 struct sis190_private *tp = netdev_priv(dev);
325
326 mdio_write(tp->mmio_addr, reg, val);
327}
328
329static int __mdio_read(struct net_device *dev, int phy_id, int reg)
330{
331 struct sis190_private *tp = netdev_priv(dev);
332
333 return mdio_read(tp->mmio_addr, reg);
334}
335
890e8d0a
FR
336static int sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
337{
338 unsigned int i;
339 u16 data;
340 u32 val;
341
342 if (!(SIS_R32(ROMControl) & 0x0002))
343 return 0;
344
345 val = (0x0080 | (0x2 << 8) | (reg << 10));
346
347 SIS_W32(ROMInterface, val);
348
349 for (i = 0; i < 200; i++) {
350 if (!(SIS_R32(ROMInterface) & 0x0080))
351 break;
352 msleep(1);
353 }
354
355 data = (u16) ((SIS_R32(ROMInterface) & 0xffff0000) >> 16);
356
357 return data;
358}
359
360static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
361{
362 SIS_W32(IntrMask, 0x00);
363 SIS_W32(IntrStatus, 0xffffffff);
364 SIS_PCI_COMMIT();
365}
366
367static void sis190_asic_down(void __iomem *ioaddr)
368{
369 /* Stop the chip's Tx and Rx DMA processes. */
370
371 SIS_W32(TxControl, 0x1a00);
372 SIS_W32(RxControl, 0x1a00);
373
374 sis190_irq_mask_and_ack(ioaddr);
375}
376
377static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
378{
379 desc->size |= cpu_to_le32(RingEnd);
380}
381
382static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
383{
384 u32 eor = le32_to_cpu(desc->size) & RingEnd;
385
386 desc->PSize = 0x0;
387 desc->size = cpu_to_le32(rx_buf_sz | eor);
388 wmb();
389 desc->status = cpu_to_le32(OWNbit | INTbit);
390}
391
392static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
393 u32 rx_buf_sz)
394{
395 desc->addr = cpu_to_le32(mapping);
396 sis190_give_to_asic(desc, rx_buf_sz);
397}
398
399static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
400{
401 desc->PSize = 0x0;
402 desc->addr = 0xdeadbeef;
403 desc->size &= cpu_to_le32(RingEnd);
404 wmb();
405 desc->status = 0x0;
406}
407
408static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
409 struct RxDesc *desc, u32 rx_buf_sz)
410{
411 struct sk_buff *skb;
412 dma_addr_t mapping;
413 int ret = 0;
414
415 skb = dev_alloc_skb(rx_buf_sz);
416 if (!skb)
417 goto err_out;
418
419 *sk_buff = skb;
420
421 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
422 PCI_DMA_FROMDEVICE);
423
424 sis190_map_to_asic(desc, mapping, rx_buf_sz);
425out:
426 return ret;
427
428err_out:
429 ret = -ENOMEM;
430 sis190_make_unusable_by_asic(desc);
431 goto out;
432}
433
434static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
435 u32 start, u32 end)
436{
437 u32 cur;
438
439 for (cur = start; cur < end; cur++) {
440 int ret, i = cur % NUM_RX_DESC;
441
442 if (tp->Rx_skbuff[i])
443 continue;
444
445 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
446 tp->RxDescRing + i, tp->rx_buf_sz);
447 if (ret < 0)
448 break;
449 }
450 return cur - start;
451}
452
453static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
454 struct RxDesc *desc, int rx_buf_sz)
455{
456 int ret = -1;
457
458 if (pkt_size < rx_copybreak) {
459 struct sk_buff *skb;
460
461 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
462 if (skb) {
463 skb_reserve(skb, NET_IP_ALIGN);
464 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
465 *sk_buff = skb;
466 sis190_give_to_asic(desc, rx_buf_sz);
467 ret = 0;
468 }
469 }
470 return ret;
471}
472
473static int sis190_rx_interrupt(struct net_device *dev,
474 struct sis190_private *tp, void __iomem *ioaddr)
475{
476 struct net_device_stats *stats = &tp->stats;
477 u32 rx_left, cur_rx = tp->cur_rx;
478 u32 delta, count;
479
480 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
481 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
482
483 for (; rx_left > 0; rx_left--, cur_rx++) {
484 unsigned int entry = cur_rx % NUM_RX_DESC;
485 struct RxDesc *desc = tp->RxDescRing + entry;
486 u32 status;
487
488 if (desc->status & OWNbit)
489 break;
490
491 status = le32_to_cpu(desc->PSize);
492
493 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
494 // status);
495
496 if (status & RxCRC) {
497 net_intr(tp, KERN_INFO "%s: bad crc. status = %08x.\n",
498 dev->name, status);
499 stats->rx_errors++;
500 stats->rx_crc_errors++;
501 sis190_give_to_asic(desc, tp->rx_buf_sz);
502 } else if (!(status & PADbit)) {
503 net_intr(tp, KERN_INFO "%s: bad pad. status = %08x.\n",
504 dev->name, status);
505 stats->rx_errors++;
506 stats->rx_length_errors++;
507 sis190_give_to_asic(desc, tp->rx_buf_sz);
508 } else {
509 struct sk_buff *skb = tp->Rx_skbuff[entry];
510 int pkt_size = (status & RxSizeMask) - 4;
511 void (*pci_action)(struct pci_dev *, dma_addr_t,
512 size_t, int) = pci_dma_sync_single_for_device;
513
514 if (unlikely(pkt_size > tp->rx_buf_sz)) {
515 net_intr(tp, KERN_INFO
516 "%s: (frag) status = %08x.\n",
517 dev->name, status);
518 stats->rx_dropped++;
519 stats->rx_length_errors++;
520 sis190_give_to_asic(desc, tp->rx_buf_sz);
521 continue;
522 }
523
524 pci_dma_sync_single_for_cpu(tp->pci_dev,
525 le32_to_cpu(desc->addr), tp->rx_buf_sz,
526 PCI_DMA_FROMDEVICE);
527
528 if (sis190_try_rx_copy(&skb, pkt_size, desc,
529 tp->rx_buf_sz)) {
530 pci_action = pci_unmap_single;
531 tp->Rx_skbuff[entry] = NULL;
532 sis190_make_unusable_by_asic(desc);
533 }
534
535 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
536 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
537
538 skb->dev = dev;
539 skb_put(skb, pkt_size);
540 skb->protocol = eth_type_trans(skb, dev);
541
542 sis190_rx_skb(skb);
543
544 dev->last_rx = jiffies;
545 stats->rx_bytes += pkt_size;
546 stats->rx_packets++;
547 }
548 }
549 count = cur_rx - tp->cur_rx;
550 tp->cur_rx = cur_rx;
551
552 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
553 if (!delta && count && netif_msg_intr(tp))
554 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
555 tp->dirty_rx += delta;
556
557 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
558 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
559
560 return count;
561}
562
563static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
564 struct TxDesc *desc)
565{
566 unsigned int len;
567
568 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
569
570 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
571
572 memset(desc, 0x00, sizeof(*desc));
573}
574
575static void sis190_tx_interrupt(struct net_device *dev,
576 struct sis190_private *tp, void __iomem *ioaddr)
577{
578 u32 pending, dirty_tx = tp->dirty_tx;
579 /*
580 * It would not be needed if queueing was allowed to be enabled
581 * again too early (hint: think preempt and unclocked smp systems).
582 */
583 unsigned int queue_stopped;
584
585 smp_rmb();
586 pending = tp->cur_tx - dirty_tx;
587 queue_stopped = (pending == NUM_TX_DESC);
588
589 for (; pending; pending--, dirty_tx++) {
590 unsigned int entry = dirty_tx % NUM_TX_DESC;
591 struct TxDesc *txd = tp->TxDescRing + entry;
592 struct sk_buff *skb;
593
594 if (le32_to_cpu(txd->status) & OWNbit)
595 break;
596
597 skb = tp->Tx_skbuff[entry];
598
599 tp->stats.tx_packets++;
600 tp->stats.tx_bytes += skb->len;
601
602 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
603 tp->Tx_skbuff[entry] = NULL;
604 dev_kfree_skb_irq(skb);
605 }
606
607 if (tp->dirty_tx != dirty_tx) {
608 tp->dirty_tx = dirty_tx;
609 smp_wmb();
610 if (queue_stopped)
611 netif_wake_queue(dev);
612 }
613}
614
615/*
616 * The interrupt handler does all of the Rx thread work and cleans up after
617 * the Tx thread.
618 */
619static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
620{
621 struct net_device *dev = __dev;
622 struct sis190_private *tp = netdev_priv(dev);
623 void __iomem *ioaddr = tp->mmio_addr;
624 unsigned int handled = 0;
625 u32 status;
626
627 status = SIS_R32(IntrStatus);
628
629 if ((status == 0xffffffff) || !status)
630 goto out;
631
632 handled = 1;
633
634 if (unlikely(!netif_running(dev))) {
635 sis190_asic_down(ioaddr);
636 goto out;
637 }
638
639 SIS_W32(IntrStatus, status);
640
641 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
642
643 if (status & LinkChange) {
644 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
645 schedule_work(&tp->phy_task);
646 }
647
648 if (status & RxQInt)
649 sis190_rx_interrupt(dev, tp, ioaddr);
650
651 if (status & TxQ0Int)
652 sis190_tx_interrupt(dev, tp, ioaddr);
653out:
654 return IRQ_RETVAL(handled);
655}
656
4405d3b5
FR
657#ifdef CONFIG_NET_POLL_CONTROLLER
658static void sis190_netpoll(struct net_device *dev)
659{
660 struct sis190_private *tp = netdev_priv(dev);
661 struct pci_dev *pdev = tp->pci_dev;
662
663 disable_irq(pdev->irq);
664 sis190_interrupt(pdev->irq, dev, NULL);
665 enable_irq(pdev->irq);
666}
667#endif
668
890e8d0a
FR
669static void sis190_free_rx_skb(struct sis190_private *tp,
670 struct sk_buff **sk_buff, struct RxDesc *desc)
671{
672 struct pci_dev *pdev = tp->pci_dev;
673
674 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
675 PCI_DMA_FROMDEVICE);
676 dev_kfree_skb(*sk_buff);
677 *sk_buff = NULL;
678 sis190_make_unusable_by_asic(desc);
679}
680
681static void sis190_rx_clear(struct sis190_private *tp)
682{
683 unsigned int i;
684
685 for (i = 0; i < NUM_RX_DESC; i++) {
686 if (!tp->Rx_skbuff[i])
687 continue;
688 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
689 }
690}
691
692static void sis190_init_ring_indexes(struct sis190_private *tp)
693{
694 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
695}
696
697static int sis190_init_ring(struct net_device *dev)
698{
699 struct sis190_private *tp = netdev_priv(dev);
700
701 sis190_init_ring_indexes(tp);
702
703 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
704 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
705
706 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
707 goto err_rx_clear;
708
709 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
710
711 return 0;
712
713err_rx_clear:
714 sis190_rx_clear(tp);
715 return -ENOMEM;
716}
717
718static void sis190_set_rx_mode(struct net_device *dev)
719{
720 struct sis190_private *tp = netdev_priv(dev);
721 void __iomem *ioaddr = tp->mmio_addr;
722 unsigned long flags;
723 u32 mc_filter[2]; /* Multicast hash filter */
724 u16 rx_mode;
725
726 if (dev->flags & IFF_PROMISC) {
727 /* Unconditionally log net taps. */
728 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
729 dev->name);
730 rx_mode =
731 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
732 AcceptAllPhys;
733 mc_filter[1] = mc_filter[0] = 0xffffffff;
734 } else if ((dev->mc_count > multicast_filter_limit) ||
735 (dev->flags & IFF_ALLMULTI)) {
736 /* Too many to filter perfectly -- accept all multicasts. */
737 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
738 mc_filter[1] = mc_filter[0] = 0xffffffff;
739 } else {
740 struct dev_mc_list *mclist;
741 unsigned int i;
742
743 rx_mode = AcceptBroadcast | AcceptMyPhys;
744 mc_filter[1] = mc_filter[0] = 0;
745 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
746 i++, mclist = mclist->next) {
747 int bit_nr =
748 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
749 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
750 rx_mode |= AcceptMulticast;
751 }
752 }
753
754 spin_lock_irqsave(&tp->lock, flags);
755
756 SIS_W16(RxMacControl, rx_mode | 0x2);
757 SIS_W32(RxHashTable, mc_filter[0]);
758 SIS_W32(RxHashTable + 4, mc_filter[1]);
759
760 spin_unlock_irqrestore(&tp->lock, flags);
761}
762
763static void sis190_soft_reset(void __iomem *ioaddr)
764{
765 SIS_W32(IntrControl, 0x8000);
766 SIS_PCI_COMMIT();
767 msleep(1);
768 SIS_W32(IntrControl, 0x0);
769 sis190_asic_down(ioaddr);
770 msleep(1);
771}
772
773static void sis190_hw_start(struct net_device *dev)
774{
775 struct sis190_private *tp = netdev_priv(dev);
776 void __iomem *ioaddr = tp->mmio_addr;
777
778 sis190_soft_reset(ioaddr);
779
780 SIS_W32(TxDescStartAddr, tp->tx_dma);
781 SIS_W32(RxDescStartAddr, tp->rx_dma);
782
783 SIS_W32(IntrStatus, 0xffffffff);
784 SIS_W32(IntrMask, 0x0);
785 /*
786 * Default is 100Mbps.
787 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
788 */
789 SIS_W16(StationControl, 0x1901);
790 SIS_W32(GMIIControl, 0x0);
791 SIS_W32(TxMacControl, 0x60);
792 SIS_W16(RxMacControl, 0x02);
793 SIS_W32(RxHashTable, 0x0);
794 SIS_W32(0x6c, 0x0);
188f23ba
FR
795 SIS_W32(RxWolCtrl, 0x0);
796 SIS_W32(RxWolData, 0x0);
890e8d0a
FR
797
798 SIS_PCI_COMMIT();
799
800 sis190_set_rx_mode(dev);
801
802 /* Enable all known interrupts by setting the interrupt mask. */
803 SIS_W32(IntrMask, sis190_intr_mask);
804
805 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
806 SIS_W32(RxControl, 0x1a1d);
807
808 netif_start_queue(dev);
809}
810
811static void sis190_phy_task(void * data)
812{
813 struct net_device *dev = data;
814 struct sis190_private *tp = netdev_priv(dev);
815 void __iomem *ioaddr = tp->mmio_addr;
816 u16 val;
817
43afb949
FR
818 rtnl_lock();
819
890e8d0a
FR
820 val = mdio_read(ioaddr, MII_BMCR);
821 if (val & BMCR_RESET) {
822 // FIXME: needlessly high ? -- FR 02/07/2005
823 mod_timer(&tp->timer, jiffies + HZ/10);
824 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
825 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
826 dev->name);
827 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
828 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
829 } else {
830 /* Rejoice ! */
831 struct {
832 int val;
833 const char *msg;
834 u16 ctl;
835 } reg31[] = {
836 { LPA_1000XFULL | LPA_SLCT,
837 "1000 Mbps Full Duplex",
838 0x01 | _1000bpsF },
839 { LPA_1000XHALF | LPA_SLCT,
840 "1000 Mbps Half Duplex",
841 0x01 | _1000bpsH },
842 { LPA_100FULL,
843 "100 Mbps Full Duplex",
844 0x01 | _100bpsF },
845 { LPA_100HALF,
846 "100 Mbps Half Duplex",
847 0x01 | _100bpsH },
848 { LPA_10FULL,
849 "10 Mbps Full Duplex",
850 0x01 | _10bpsF },
851 { LPA_10HALF,
852 "10 Mbps Half Duplex",
853 0x01 | _10bpsH },
854 { 0, "unknown", 0x0000 }
855 }, *p;
856
857 val = mdio_read(ioaddr, 0x1f);
858 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
859
860 val = mdio_read(ioaddr, MII_LPA);
861 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
862
863 for (p = reg31; p->ctl; p++) {
864 if ((val & p->val) == p->val)
865 break;
866 }
867 if (p->ctl)
868 SIS_W16(StationControl, p->ctl);
869 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
870 p->msg);
871 netif_carrier_on(dev);
872 }
43afb949
FR
873
874 rtnl_unlock();
890e8d0a
FR
875}
876
877static void sis190_phy_timer(unsigned long __opaque)
878{
879 struct net_device *dev = (struct net_device *)__opaque;
880 struct sis190_private *tp = netdev_priv(dev);
881
882 if (likely(netif_running(dev)))
883 schedule_work(&tp->phy_task);
884}
885
886static inline void sis190_delete_timer(struct net_device *dev)
887{
888 struct sis190_private *tp = netdev_priv(dev);
889
890 del_timer_sync(&tp->timer);
891}
892
893static inline void sis190_request_timer(struct net_device *dev)
894{
895 struct sis190_private *tp = netdev_priv(dev);
896 struct timer_list *timer = &tp->timer;
897
898 init_timer(timer);
899 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
900 timer->data = (unsigned long)dev;
901 timer->function = sis190_phy_timer;
902 add_timer(timer);
903}
904
905static void sis190_set_rxbufsize(struct sis190_private *tp,
906 struct net_device *dev)
907{
908 unsigned int mtu = dev->mtu;
909
910 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
911}
912
913static int sis190_open(struct net_device *dev)
914{
915 struct sis190_private *tp = netdev_priv(dev);
916 struct pci_dev *pdev = tp->pci_dev;
917 int rc = -ENOMEM;
918
919 sis190_set_rxbufsize(tp, dev);
920
921 /*
922 * Rx and Tx descriptors need 256 bytes alignment.
923 * pci_alloc_consistent() guarantees a stronger alignment.
924 */
925 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
926 if (!tp->TxDescRing)
927 goto out;
928
929 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
930 if (!tp->RxDescRing)
931 goto err_free_tx_0;
932
933 rc = sis190_init_ring(dev);
934 if (rc < 0)
935 goto err_free_rx_1;
936
937 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
938
939 sis190_request_timer(dev);
940
941 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
942 if (rc < 0)
943 goto err_release_timer_2;
944
945 sis190_hw_start(dev);
946out:
947 return rc;
948
949err_release_timer_2:
950 sis190_delete_timer(dev);
951 sis190_rx_clear(tp);
952err_free_rx_1:
953 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
954 tp->rx_dma);
955err_free_tx_0:
956 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
957 tp->tx_dma);
958 goto out;
959}
960
961static void sis190_tx_clear(struct sis190_private *tp)
962{
963 unsigned int i;
964
965 for (i = 0; i < NUM_TX_DESC; i++) {
966 struct sk_buff *skb = tp->Tx_skbuff[i];
967
968 if (!skb)
969 continue;
970
971 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
972 tp->Tx_skbuff[i] = NULL;
973 dev_kfree_skb(skb);
974
975 tp->stats.tx_dropped++;
976 }
977 tp->cur_tx = tp->dirty_tx = 0;
978}
979
980static void sis190_down(struct net_device *dev)
981{
982 struct sis190_private *tp = netdev_priv(dev);
983 void __iomem *ioaddr = tp->mmio_addr;
984 unsigned int poll_locked = 0;
985
986 sis190_delete_timer(dev);
987
988 netif_stop_queue(dev);
989
990 flush_scheduled_work();
991
992 do {
993 spin_lock_irq(&tp->lock);
994
995 sis190_asic_down(ioaddr);
996
997 spin_unlock_irq(&tp->lock);
998
999 synchronize_irq(dev->irq);
1000
1001 if (!poll_locked) {
1002 netif_poll_disable(dev);
1003 poll_locked++;
1004 }
1005
1006 synchronize_sched();
1007
1008 } while (SIS_R32(IntrMask));
1009
1010 sis190_tx_clear(tp);
1011 sis190_rx_clear(tp);
1012}
1013
1014static int sis190_close(struct net_device *dev)
1015{
1016 struct sis190_private *tp = netdev_priv(dev);
1017 struct pci_dev *pdev = tp->pci_dev;
1018
1019 sis190_down(dev);
1020
1021 free_irq(dev->irq, dev);
1022
1023 netif_poll_enable(dev);
1024
1025 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1026 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1027
1028 tp->TxDescRing = NULL;
1029 tp->RxDescRing = NULL;
1030
1031 return 0;
1032}
1033
1034static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1035{
1036 struct sis190_private *tp = netdev_priv(dev);
1037 void __iomem *ioaddr = tp->mmio_addr;
1038 u32 len, entry, dirty_tx;
1039 struct TxDesc *desc;
1040 dma_addr_t mapping;
1041
1042 if (unlikely(skb->len < ETH_ZLEN)) {
1043 skb = skb_padto(skb, ETH_ZLEN);
1044 if (!skb) {
1045 tp->stats.tx_dropped++;
1046 goto out;
1047 }
1048 len = ETH_ZLEN;
1049 } else {
1050 len = skb->len;
1051 }
1052
1053 entry = tp->cur_tx % NUM_TX_DESC;
1054 desc = tp->TxDescRing + entry;
1055
1056 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1057 netif_stop_queue(dev);
1058 net_tx_err(tp, KERN_ERR PFX
1059 "%s: BUG! Tx Ring full when queue awake!\n",
1060 dev->name);
1061 return NETDEV_TX_BUSY;
1062 }
1063
1064 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1065
1066 tp->Tx_skbuff[entry] = skb;
1067
1068 desc->PSize = cpu_to_le32(len);
1069 desc->addr = cpu_to_le32(mapping);
1070
1071 desc->size = cpu_to_le32(len);
1072 if (entry == (NUM_TX_DESC - 1))
1073 desc->size |= cpu_to_le32(RingEnd);
1074
1075 wmb();
1076
1077 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1078
1079 tp->cur_tx++;
1080
1081 smp_wmb();
1082
1083 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1084
1085 dev->trans_start = jiffies;
1086
1087 dirty_tx = tp->dirty_tx;
1088 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1089 netif_stop_queue(dev);
1090 smp_rmb();
1091 if (dirty_tx != tp->dirty_tx)
1092 netif_wake_queue(dev);
1093 }
1094out:
1095 return NETDEV_TX_OK;
1096}
1097
1098static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1099{
1100 struct sis190_private *tp = netdev_priv(dev);
1101
1102 return &tp->stats;
1103}
1104
1105static void sis190_release_board(struct pci_dev *pdev)
1106{
1107 struct net_device *dev = pci_get_drvdata(pdev);
1108 struct sis190_private *tp = netdev_priv(dev);
1109
1110 iounmap(tp->mmio_addr);
1111 pci_release_regions(pdev);
1112 pci_disable_device(pdev);
1113 free_netdev(dev);
1114}
1115
1116static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1117{
1118 struct sis190_private *tp;
1119 struct net_device *dev;
1120 void __iomem *ioaddr;
1121 int rc;
1122
1123 dev = alloc_etherdev(sizeof(*tp));
1124 if (!dev) {
1125 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1126 rc = -ENOMEM;
1127 goto err_out_0;
1128 }
1129
1130 SET_MODULE_OWNER(dev);
1131 SET_NETDEV_DEV(dev, &pdev->dev);
1132
1133 tp = netdev_priv(dev);
1134 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1135
1136 rc = pci_enable_device(pdev);
1137 if (rc < 0) {
1138 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1139 goto err_free_dev_1;
1140 }
1141
1142 rc = -ENODEV;
1143
1144 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1145 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1146 pci_name(pdev));
1147 goto err_pci_disable_2;
1148 }
1149 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1150 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1151 pci_name(pdev));
1152 goto err_pci_disable_2;
1153 }
1154
1155 rc = pci_request_regions(pdev, DRV_NAME);
1156 if (rc < 0) {
1157 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1158 pci_name(pdev));
1159 goto err_pci_disable_2;
1160 }
1161
1162 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1163 if (rc < 0) {
1164 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1165 pci_name(pdev));
1166 goto err_free_res_3;
1167 }
1168
1169 pci_set_master(pdev);
1170
1171 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1172 if (!ioaddr) {
1173 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1174 pci_name(pdev));
1175 rc = -EIO;
1176 goto err_free_res_3;
1177 }
1178
1179 tp->pci_dev = pdev;
1180 tp->mmio_addr = ioaddr;
1181
43afb949
FR
1182 tp->mii_if.dev = dev;
1183 tp->mii_if.mdio_read = __mdio_read;
1184 tp->mii_if.mdio_write = __mdio_write;
1185 // tp->mii_if.phy_id = XXX;
1186 tp->mii_if.phy_id_mask = 0x1f;
1187 tp->mii_if.reg_num_mask = 0x1f;
1188
890e8d0a
FR
1189 sis190_irq_mask_and_ack(ioaddr);
1190
1191 sis190_soft_reset(ioaddr);
1192out:
1193 return dev;
1194
1195err_free_res_3:
1196 pci_release_regions(pdev);
1197err_pci_disable_2:
1198 pci_disable_device(pdev);
1199err_free_dev_1:
1200 free_netdev(dev);
1201err_out_0:
1202 dev = ERR_PTR(rc);
1203 goto out;
1204}
1205
1206static void sis190_tx_timeout(struct net_device *dev)
1207{
1208 struct sis190_private *tp = netdev_priv(dev);
1209 void __iomem *ioaddr = tp->mmio_addr;
1210 u8 tmp8;
1211
1212 /* Disable Tx, if not already */
1213 tmp8 = SIS_R8(TxControl);
1214 if (tmp8 & CmdTxEnb)
1215 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1216
188f23ba
FR
1217
1218 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1219 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1220
890e8d0a
FR
1221 /* Disable interrupts by clearing the interrupt mask. */
1222 SIS_W32(IntrMask, 0x0000);
1223
1224 /* Stop a shared interrupt from scavenging while we are. */
1225 spin_lock_irq(&tp->lock);
1226 sis190_tx_clear(tp);
1227 spin_unlock_irq(&tp->lock);
1228
1229 /* ...and finally, reset everything. */
1230 sis190_hw_start(dev);
1231
1232 netif_wake_queue(dev);
1233}
1234
1235static void sis190_set_speed_auto(struct net_device *dev)
1236{
1237 struct sis190_private *tp = netdev_priv(dev);
1238 void __iomem *ioaddr = tp->mmio_addr;
1239 int val;
1240
1241 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1242
1243 val = mdio_read(ioaddr, MII_ADVERTISE);
1244
1245 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1246 // unchanged.
1247 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1248 ADVERTISE_100FULL | ADVERTISE_10FULL |
1249 ADVERTISE_100HALF | ADVERTISE_10HALF);
1250
1251 // Enable 1000 Full Mode.
1252 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1253
1254 // Enable auto-negotiation and restart auto-negotiation.
1255 mdio_write(ioaddr, MII_BMCR,
1256 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1257}
1258
43afb949
FR
1259static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1260{
1261 struct sis190_private *tp = netdev_priv(dev);
1262
1263 return mii_ethtool_gset(&tp->mii_if, cmd);
1264}
1265
1266static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1267{
1268 struct sis190_private *tp = netdev_priv(dev);
1269
1270 return mii_ethtool_sset(&tp->mii_if, cmd);
1271}
1272
890e8d0a
FR
1273static void sis190_get_drvinfo(struct net_device *dev,
1274 struct ethtool_drvinfo *info)
1275{
1276 struct sis190_private *tp = netdev_priv(dev);
1277
1278 strcpy(info->driver, DRV_NAME);
1279 strcpy(info->version, DRV_VERSION);
1280 strcpy(info->bus_info, pci_name(tp->pci_dev));
1281}
1282
1283static int sis190_get_regs_len(struct net_device *dev)
1284{
1285 return SIS190_REGS_SIZE;
1286}
1287
1288static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1289 void *p)
1290{
1291 struct sis190_private *tp = netdev_priv(dev);
1292 unsigned long flags;
1293
1294 if (regs->len > SIS190_REGS_SIZE)
1295 regs->len = SIS190_REGS_SIZE;
1296
1297 spin_lock_irqsave(&tp->lock, flags);
1298 memcpy_fromio(p, tp->mmio_addr, regs->len);
1299 spin_unlock_irqrestore(&tp->lock, flags);
1300}
1301
43afb949
FR
1302static int sis190_nway_reset(struct net_device *dev)
1303{
1304 struct sis190_private *tp = netdev_priv(dev);
1305
1306 return mii_nway_restart(&tp->mii_if);
1307}
1308
890e8d0a
FR
1309static u32 sis190_get_msglevel(struct net_device *dev)
1310{
1311 struct sis190_private *tp = netdev_priv(dev);
1312
1313 return tp->msg_enable;
1314}
1315
1316static void sis190_set_msglevel(struct net_device *dev, u32 value)
1317{
1318 struct sis190_private *tp = netdev_priv(dev);
1319
1320 tp->msg_enable = value;
1321}
1322
1323static struct ethtool_ops sis190_ethtool_ops = {
43afb949
FR
1324 .get_settings = sis190_get_settings,
1325 .set_settings = sis190_set_settings,
890e8d0a
FR
1326 .get_drvinfo = sis190_get_drvinfo,
1327 .get_regs_len = sis190_get_regs_len,
1328 .get_regs = sis190_get_regs,
1329 .get_link = ethtool_op_get_link,
1330 .get_msglevel = sis190_get_msglevel,
1331 .set_msglevel = sis190_set_msglevel,
43afb949 1332 .nway_reset = sis190_nway_reset,
890e8d0a
FR
1333};
1334
43afb949
FR
1335static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1336{
1337 struct sis190_private *tp = netdev_priv(dev);
1338
1339 return !netif_running(dev) ? -EINVAL :
1340 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1341}
1342
890e8d0a
FR
1343static int __devinit sis190_init_one(struct pci_dev *pdev,
1344 const struct pci_device_id *ent)
1345{
1346 static int printed_version = 0;
1347 struct sis190_private *tp;
1348 struct net_device *dev;
1349 void __iomem *ioaddr;
1350 int i, rc;
1351
1352 if (!printed_version) {
1353 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1354 printed_version = 1;
1355 }
1356
1357 dev = sis190_init_board(pdev);
1358 if (IS_ERR(dev)) {
1359 rc = PTR_ERR(dev);
1360 goto out;
1361 }
1362
1363 tp = netdev_priv(dev);
1364 ioaddr = tp->mmio_addr;
1365
1366 /* Get MAC address */
1367 /* Read node address from the EEPROM */
1368
1369 if (SIS_R32(ROMControl) & 0x4) {
1370 for (i = 0; i < 3; i++) {
1371 SIS_W16(RxMacAddr + 2*i,
1372 sis190_read_eeprom(ioaddr, 3 + i));
1373 }
1374 }
1375
1376 for (i = 0; i < MAC_ADDR_LEN; i++)
1377 dev->dev_addr[i] = SIS_R8(RxMacAddr + i);
1378
1379 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1380
1381 dev->open = sis190_open;
1382 dev->stop = sis190_close;
43afb949 1383 dev->do_ioctl = sis190_ioctl;
890e8d0a
FR
1384 dev->get_stats = sis190_get_stats;
1385 dev->tx_timeout = sis190_tx_timeout;
1386 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1387 dev->hard_start_xmit = sis190_start_xmit;
4405d3b5
FR
1388#ifdef CONFIG_NET_POLL_CONTROLLER
1389 dev->poll_controller = sis190_netpoll;
1390#endif
890e8d0a
FR
1391 dev->set_multicast_list = sis190_set_rx_mode;
1392 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1393 dev->irq = pdev->irq;
1394 dev->base_addr = (unsigned long) 0xdead;
1395
1396 spin_lock_init(&tp->lock);
1397 rc = register_netdev(dev);
1398 if (rc < 0) {
1399 sis190_release_board(pdev);
1400 goto out;
1401 }
1402
1403 pci_set_drvdata(pdev, dev);
1404
1405 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1406 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1407 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1408 ioaddr, dev->irq,
1409 dev->dev_addr[0], dev->dev_addr[1],
1410 dev->dev_addr[2], dev->dev_addr[3],
1411 dev->dev_addr[4], dev->dev_addr[5]);
1412
1413 netif_carrier_off(dev);
1414
1415 sis190_set_speed_auto(dev);
1416out:
1417 return rc;
1418}
1419
1420static void __devexit sis190_remove_one(struct pci_dev *pdev)
1421{
1422 struct net_device *dev = pci_get_drvdata(pdev);
1423
1424 unregister_netdev(dev);
1425 sis190_release_board(pdev);
1426 pci_set_drvdata(pdev, NULL);
1427}
1428
1429static struct pci_driver sis190_pci_driver = {
1430 .name = DRV_NAME,
1431 .id_table = sis190_pci_tbl,
1432 .probe = sis190_init_one,
1433 .remove = __devexit_p(sis190_remove_one),
1434};
1435
1436static int __init sis190_init_module(void)
1437{
1438 return pci_module_init(&sis190_pci_driver);
1439}
1440
1441static void __exit sis190_cleanup_module(void)
1442{
1443 pci_unregister_driver(&sis190_pci_driver);
1444}
1445
1446module_init(sis190_init_module);
1447module_exit(sis190_cleanup_module);