[PATCH] sis190: remove hardcoded constants.
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / sis190.c
CommitLineData
890e8d0a
FR
1/*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
40292fb0
FR
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
890e8d0a
FR
10
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
17
18 See the file COPYING in this distribution for more information.
19
20 */
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/netdevice.h>
43afb949 25#include <linux/rtnetlink.h>
890e8d0a
FR
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/pci.h>
29#include <linux/mii.h>
30#include <linux/delay.h>
31#include <linux/crc32.h>
32#include <linux/dma-mapping.h>
33#include <asm/irq.h>
34
35#define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37#define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39#define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41#define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46#ifdef CONFIG_SIS190_NAPI
47#define NAPI_SUFFIX "-NAPI"
48#else
49#define NAPI_SUFFIX ""
50#endif
51
52#define DRV_VERSION "1.2" NAPI_SUFFIX
53#define DRV_NAME "sis190"
54#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
55#define PFX DRV_NAME ": "
56
57#ifdef CONFIG_SIS190_NAPI
58#define sis190_rx_skb netif_receive_skb
59#define sis190_rx_quota(count, quota) min(count, quota)
60#else
61#define sis190_rx_skb netif_rx
62#define sis190_rx_quota(count, quota) count
63#endif
64
65#define MAC_ADDR_LEN 6
66
67#define NUM_TX_DESC 64
68#define NUM_RX_DESC 64
69#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
70#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
71#define RX_BUF_SIZE 1536
72
73#define SIS190_REGS_SIZE 0x80
74#define SIS190_TX_TIMEOUT (6*HZ)
75#define SIS190_PHY_TIMEOUT (10*HZ)
76#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
77 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
78 NETIF_MSG_IFDOWN)
79
80/* Enhanced PHY access register bit definitions */
81#define EhnMIIread 0x0000
82#define EhnMIIwrite 0x0020
83#define EhnMIIdataShift 16
84#define EhnMIIpmdShift 6 /* 7016 only */
85#define EhnMIIregShift 11
86#define EhnMIIreq 0x0010
87#define EhnMIInotDone 0x0010
88
89/* Write/read MMIO register */
90#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
91#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
92#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
93#define SIS_R8(reg) readb (ioaddr + (reg))
94#define SIS_R16(reg) readw (ioaddr + (reg))
95#define SIS_R32(reg) readl (ioaddr + (reg))
96
97#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
98
99enum sis190_registers {
100 TxControl = 0x00,
101 TxDescStartAddr = 0x04,
188f23ba
FR
102 rsv0 = 0x08, // reserved
103 TxSts = 0x0c, // unused (Control/Status)
890e8d0a
FR
104 RxControl = 0x10,
105 RxDescStartAddr = 0x14,
188f23ba
FR
106 rsv1 = 0x18, // reserved
107 RxSts = 0x1c, // unused
890e8d0a
FR
108 IntrStatus = 0x20,
109 IntrMask = 0x24,
110 IntrControl = 0x28,
188f23ba
FR
111 IntrTimer = 0x2c, // unused (Interupt Timer)
112 PMControl = 0x30, // unused (Power Mgmt Control/Status)
113 rsv2 = 0x34, // reserved
890e8d0a
FR
114 ROMControl = 0x38,
115 ROMInterface = 0x3c,
116 StationControl = 0x40,
117 GMIIControl = 0x44,
188f23ba
FR
118 GIoCR = 0x48, // unused (GMAC IO Compensation)
119 GIoCtrl = 0x4c, // unused (GMAC IO Control)
890e8d0a 120 TxMacControl = 0x50,
188f23ba
FR
121 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
122 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
123 rsv3 = 0x5c, // reserved
890e8d0a
FR
124 RxMacControl = 0x60,
125 RxMacAddr = 0x62,
126 RxHashTable = 0x68,
127 // Undocumented = 0x6c,
188f23ba
FR
128 RxWolCtrl = 0x70,
129 RxWolData = 0x74, // unused (Rx WOL Data Access)
130 RxMPSControl = 0x78, // unused (Rx MPS Control)
131 rsv4 = 0x7c, // reserved
890e8d0a
FR
132};
133
134enum sis190_register_content {
135 /* IntrStatus */
136 SoftInt = 0x40000000, // unused
137 Timeup = 0x20000000, // unused
138 PauseFrame = 0x00080000, // unused
139 MagicPacket = 0x00040000, // unused
140 WakeupFrame = 0x00020000, // unused
141 LinkChange = 0x00010000,
142 RxQEmpty = 0x00000080,
143 RxQInt = 0x00000040,
144 TxQ1Empty = 0x00000020, // unused
145 TxQ1Int = 0x00000010,
146 TxQ0Empty = 0x00000008, // unused
147 TxQ0Int = 0x00000004,
148 RxHalt = 0x00000002,
149 TxHalt = 0x00000001,
150
151 /* RxStatusDesc */
152 RxRES = 0x00200000, // unused
153 RxCRC = 0x00080000,
154 RxRUNT = 0x00100000, // unused
155 RxRWT = 0x00400000, // unused
156
157 /* {Rx/Tx}CmdBits */
158 CmdReset = 0x10,
159 CmdRxEnb = 0x08, // unused
160 CmdTxEnb = 0x01,
161 RxBufEmpty = 0x01, // unused
162
163 /* Cfg9346Bits */
164 Cfg9346_Lock = 0x00, // unused
165 Cfg9346_Unlock = 0xc0, // unused
166
167 /* RxMacControl */
168 AcceptErr = 0x20, // unused
169 AcceptRunt = 0x10, // unused
170 AcceptBroadcast = 0x0800,
171 AcceptMulticast = 0x0400,
172 AcceptMyPhys = 0x0200,
173 AcceptAllPhys = 0x0100,
174
175 /* RxConfigBits */
176 RxCfgFIFOShift = 13,
177 RxCfgDMAShift = 8, // 0x1a in RxControl ?
178
179 /* TxConfigBits */
180 TxInterFrameGapShift = 24,
181 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
182
183 /* StationControl */
184 _1000bpsF = 0x1c00,
185 _1000bpsH = 0x0c00,
186 _100bpsF = 0x1800,
187 _100bpsH = 0x0800,
188 _10bpsF = 0x1400,
189 _10bpsH = 0x0400,
190
191 LinkStatus = 0x02, // unused
192 FullDup = 0x01, // unused
193
194 /* TBICSRBit */
195 TBILinkOK = 0x02000000, // unused
196};
197
198struct TxDesc {
199 u32 PSize;
200 u32 status;
201 u32 addr;
202 u32 size;
203};
204
205struct RxDesc {
206 u32 PSize;
207 u32 status;
208 u32 addr;
209 u32 size;
210};
211
212enum _DescStatusBit {
213 /* _Desc.status */
214 OWNbit = 0x80000000,
215 INTbit = 0x40000000,
216 DEFbit = 0x00200000,
217 CRCbit = 0x00020000,
218 PADbit = 0x00010000,
219 /* _Desc.size */
220 RingEnd = (1 << 31),
221 /* _Desc.PSize */
222 RxSizeMask = 0x0000ffff
223};
224
40292fb0
FR
225enum sis190_eeprom_access_register_bits {
226 EECS = 0x00000001, // unused
227 EECLK = 0x00000002, // unused
228 EEDO = 0x00000008, // unused
229 EEDI = 0x00000004, // unused
230 EEREQ = 0x00000080,
231 EEROP = 0x00000200,
232 EEWOP = 0x00000100 // unused
233};
234
890e8d0a
FR
235struct sis190_private {
236 void __iomem *mmio_addr;
237 struct pci_dev *pci_dev;
238 struct net_device_stats stats;
239 spinlock_t lock;
240 u32 rx_buf_sz;
241 u32 cur_rx;
242 u32 cur_tx;
243 u32 dirty_rx;
244 u32 dirty_tx;
245 dma_addr_t rx_dma;
246 dma_addr_t tx_dma;
247 struct RxDesc *RxDescRing;
248 struct TxDesc *TxDescRing;
249 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
250 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
251 struct work_struct phy_task;
252 struct timer_list timer;
253 u32 msg_enable;
43afb949 254 struct mii_if_info mii_if;
890e8d0a
FR
255};
256
257const static struct {
258 const char *name;
259 u8 version; /* depend on docs */
260 u32 RxConfigMask; /* clear the bits supported by this chip */
261} sis_chip_info[] = {
262 { DRV_NAME, 0x00, 0xff7e1880, },
263};
264
265static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
266 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
267 { 0, },
268};
269
270MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
271
272static int rx_copybreak = 200;
273
274static struct {
275 u32 msg_enable;
276} debug = { -1 };
277
278MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
279module_param(rx_copybreak, int, 0);
280MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
281module_param_named(debug, debug.msg_enable, int, 0);
282MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
283MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
284MODULE_VERSION(DRV_VERSION);
285MODULE_LICENSE("GPL");
286
287static const u32 sis190_intr_mask =
288 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
289
290/*
291 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
292 * The chips use a 64 element hash table based on the Ethernet CRC.
293 */
294static int multicast_filter_limit = 32;
295
296static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
297{
298 unsigned int i;
299
300 SIS_W32(GMIIControl, ctl);
301
302 msleep(1);
303
304 for (i = 0; i < 100; i++) {
305 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
306 break;
307 msleep(1);
308 }
309
310 if (i > 999)
311 printk(KERN_ERR PFX "PHY command failed !\n");
312}
313
314static void mdio_write(void __iomem *ioaddr, int reg, int val)
315{
316 u32 pmd = 1;
317
318 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
319 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
320 (((u32) val) << EhnMIIdataShift));
321}
322
323static int mdio_read(void __iomem *ioaddr, int reg)
324{
325 u32 pmd = 1;
326
327 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
328 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
329
330 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
331}
332
43afb949
FR
333static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
334{
335 struct sis190_private *tp = netdev_priv(dev);
336
337 mdio_write(tp->mmio_addr, reg, val);
338}
339
340static int __mdio_read(struct net_device *dev, int phy_id, int reg)
341{
342 struct sis190_private *tp = netdev_priv(dev);
343
344 return mdio_read(tp->mmio_addr, reg);
345}
346
40292fb0 347static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
890e8d0a 348{
40292fb0 349 u16 data = 0xffff;
890e8d0a 350 unsigned int i;
890e8d0a
FR
351
352 if (!(SIS_R32(ROMControl) & 0x0002))
353 return 0;
354
40292fb0 355 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
890e8d0a
FR
356
357 for (i = 0; i < 200; i++) {
40292fb0
FR
358 if (!(SIS_R32(ROMInterface) & EEREQ)) {
359 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
890e8d0a 360 break;
40292fb0 361 }
890e8d0a
FR
362 msleep(1);
363 }
364
890e8d0a
FR
365 return data;
366}
367
368static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
369{
370 SIS_W32(IntrMask, 0x00);
371 SIS_W32(IntrStatus, 0xffffffff);
372 SIS_PCI_COMMIT();
373}
374
375static void sis190_asic_down(void __iomem *ioaddr)
376{
377 /* Stop the chip's Tx and Rx DMA processes. */
378
379 SIS_W32(TxControl, 0x1a00);
380 SIS_W32(RxControl, 0x1a00);
381
382 sis190_irq_mask_and_ack(ioaddr);
383}
384
385static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
386{
387 desc->size |= cpu_to_le32(RingEnd);
388}
389
390static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
391{
392 u32 eor = le32_to_cpu(desc->size) & RingEnd;
393
394 desc->PSize = 0x0;
395 desc->size = cpu_to_le32(rx_buf_sz | eor);
396 wmb();
397 desc->status = cpu_to_le32(OWNbit | INTbit);
398}
399
400static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
401 u32 rx_buf_sz)
402{
403 desc->addr = cpu_to_le32(mapping);
404 sis190_give_to_asic(desc, rx_buf_sz);
405}
406
407static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
408{
409 desc->PSize = 0x0;
410 desc->addr = 0xdeadbeef;
411 desc->size &= cpu_to_le32(RingEnd);
412 wmb();
413 desc->status = 0x0;
414}
415
416static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
417 struct RxDesc *desc, u32 rx_buf_sz)
418{
419 struct sk_buff *skb;
420 dma_addr_t mapping;
421 int ret = 0;
422
423 skb = dev_alloc_skb(rx_buf_sz);
424 if (!skb)
425 goto err_out;
426
427 *sk_buff = skb;
428
429 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
430 PCI_DMA_FROMDEVICE);
431
432 sis190_map_to_asic(desc, mapping, rx_buf_sz);
433out:
434 return ret;
435
436err_out:
437 ret = -ENOMEM;
438 sis190_make_unusable_by_asic(desc);
439 goto out;
440}
441
442static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
443 u32 start, u32 end)
444{
445 u32 cur;
446
447 for (cur = start; cur < end; cur++) {
448 int ret, i = cur % NUM_RX_DESC;
449
450 if (tp->Rx_skbuff[i])
451 continue;
452
453 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
454 tp->RxDescRing + i, tp->rx_buf_sz);
455 if (ret < 0)
456 break;
457 }
458 return cur - start;
459}
460
461static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
462 struct RxDesc *desc, int rx_buf_sz)
463{
464 int ret = -1;
465
466 if (pkt_size < rx_copybreak) {
467 struct sk_buff *skb;
468
469 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
470 if (skb) {
471 skb_reserve(skb, NET_IP_ALIGN);
472 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
473 *sk_buff = skb;
474 sis190_give_to_asic(desc, rx_buf_sz);
475 ret = 0;
476 }
477 }
478 return ret;
479}
480
481static int sis190_rx_interrupt(struct net_device *dev,
482 struct sis190_private *tp, void __iomem *ioaddr)
483{
484 struct net_device_stats *stats = &tp->stats;
485 u32 rx_left, cur_rx = tp->cur_rx;
486 u32 delta, count;
487
488 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
489 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
490
491 for (; rx_left > 0; rx_left--, cur_rx++) {
492 unsigned int entry = cur_rx % NUM_RX_DESC;
493 struct RxDesc *desc = tp->RxDescRing + entry;
494 u32 status;
495
496 if (desc->status & OWNbit)
497 break;
498
499 status = le32_to_cpu(desc->PSize);
500
501 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
502 // status);
503
504 if (status & RxCRC) {
505 net_intr(tp, KERN_INFO "%s: bad crc. status = %08x.\n",
506 dev->name, status);
507 stats->rx_errors++;
508 stats->rx_crc_errors++;
509 sis190_give_to_asic(desc, tp->rx_buf_sz);
510 } else if (!(status & PADbit)) {
511 net_intr(tp, KERN_INFO "%s: bad pad. status = %08x.\n",
512 dev->name, status);
513 stats->rx_errors++;
514 stats->rx_length_errors++;
515 sis190_give_to_asic(desc, tp->rx_buf_sz);
516 } else {
517 struct sk_buff *skb = tp->Rx_skbuff[entry];
518 int pkt_size = (status & RxSizeMask) - 4;
519 void (*pci_action)(struct pci_dev *, dma_addr_t,
520 size_t, int) = pci_dma_sync_single_for_device;
521
522 if (unlikely(pkt_size > tp->rx_buf_sz)) {
523 net_intr(tp, KERN_INFO
524 "%s: (frag) status = %08x.\n",
525 dev->name, status);
526 stats->rx_dropped++;
527 stats->rx_length_errors++;
528 sis190_give_to_asic(desc, tp->rx_buf_sz);
529 continue;
530 }
531
532 pci_dma_sync_single_for_cpu(tp->pci_dev,
533 le32_to_cpu(desc->addr), tp->rx_buf_sz,
534 PCI_DMA_FROMDEVICE);
535
536 if (sis190_try_rx_copy(&skb, pkt_size, desc,
537 tp->rx_buf_sz)) {
538 pci_action = pci_unmap_single;
539 tp->Rx_skbuff[entry] = NULL;
540 sis190_make_unusable_by_asic(desc);
541 }
542
543 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
544 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
545
546 skb->dev = dev;
547 skb_put(skb, pkt_size);
548 skb->protocol = eth_type_trans(skb, dev);
549
550 sis190_rx_skb(skb);
551
552 dev->last_rx = jiffies;
553 stats->rx_bytes += pkt_size;
554 stats->rx_packets++;
555 }
556 }
557 count = cur_rx - tp->cur_rx;
558 tp->cur_rx = cur_rx;
559
560 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
561 if (!delta && count && netif_msg_intr(tp))
562 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
563 tp->dirty_rx += delta;
564
565 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
566 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
567
568 return count;
569}
570
571static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
572 struct TxDesc *desc)
573{
574 unsigned int len;
575
576 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
577
578 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
579
580 memset(desc, 0x00, sizeof(*desc));
581}
582
583static void sis190_tx_interrupt(struct net_device *dev,
584 struct sis190_private *tp, void __iomem *ioaddr)
585{
586 u32 pending, dirty_tx = tp->dirty_tx;
587 /*
588 * It would not be needed if queueing was allowed to be enabled
589 * again too early (hint: think preempt and unclocked smp systems).
590 */
591 unsigned int queue_stopped;
592
593 smp_rmb();
594 pending = tp->cur_tx - dirty_tx;
595 queue_stopped = (pending == NUM_TX_DESC);
596
597 for (; pending; pending--, dirty_tx++) {
598 unsigned int entry = dirty_tx % NUM_TX_DESC;
599 struct TxDesc *txd = tp->TxDescRing + entry;
600 struct sk_buff *skb;
601
602 if (le32_to_cpu(txd->status) & OWNbit)
603 break;
604
605 skb = tp->Tx_skbuff[entry];
606
607 tp->stats.tx_packets++;
608 tp->stats.tx_bytes += skb->len;
609
610 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
611 tp->Tx_skbuff[entry] = NULL;
612 dev_kfree_skb_irq(skb);
613 }
614
615 if (tp->dirty_tx != dirty_tx) {
616 tp->dirty_tx = dirty_tx;
617 smp_wmb();
618 if (queue_stopped)
619 netif_wake_queue(dev);
620 }
621}
622
623/*
624 * The interrupt handler does all of the Rx thread work and cleans up after
625 * the Tx thread.
626 */
627static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
628{
629 struct net_device *dev = __dev;
630 struct sis190_private *tp = netdev_priv(dev);
631 void __iomem *ioaddr = tp->mmio_addr;
632 unsigned int handled = 0;
633 u32 status;
634
635 status = SIS_R32(IntrStatus);
636
637 if ((status == 0xffffffff) || !status)
638 goto out;
639
640 handled = 1;
641
642 if (unlikely(!netif_running(dev))) {
643 sis190_asic_down(ioaddr);
644 goto out;
645 }
646
647 SIS_W32(IntrStatus, status);
648
649 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
650
651 if (status & LinkChange) {
652 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
653 schedule_work(&tp->phy_task);
654 }
655
656 if (status & RxQInt)
657 sis190_rx_interrupt(dev, tp, ioaddr);
658
659 if (status & TxQ0Int)
660 sis190_tx_interrupt(dev, tp, ioaddr);
661out:
662 return IRQ_RETVAL(handled);
663}
664
4405d3b5
FR
665#ifdef CONFIG_NET_POLL_CONTROLLER
666static void sis190_netpoll(struct net_device *dev)
667{
668 struct sis190_private *tp = netdev_priv(dev);
669 struct pci_dev *pdev = tp->pci_dev;
670
671 disable_irq(pdev->irq);
672 sis190_interrupt(pdev->irq, dev, NULL);
673 enable_irq(pdev->irq);
674}
675#endif
676
890e8d0a
FR
677static void sis190_free_rx_skb(struct sis190_private *tp,
678 struct sk_buff **sk_buff, struct RxDesc *desc)
679{
680 struct pci_dev *pdev = tp->pci_dev;
681
682 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
683 PCI_DMA_FROMDEVICE);
684 dev_kfree_skb(*sk_buff);
685 *sk_buff = NULL;
686 sis190_make_unusable_by_asic(desc);
687}
688
689static void sis190_rx_clear(struct sis190_private *tp)
690{
691 unsigned int i;
692
693 for (i = 0; i < NUM_RX_DESC; i++) {
694 if (!tp->Rx_skbuff[i])
695 continue;
696 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
697 }
698}
699
700static void sis190_init_ring_indexes(struct sis190_private *tp)
701{
702 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
703}
704
705static int sis190_init_ring(struct net_device *dev)
706{
707 struct sis190_private *tp = netdev_priv(dev);
708
709 sis190_init_ring_indexes(tp);
710
711 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
712 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
713
714 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
715 goto err_rx_clear;
716
717 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
718
719 return 0;
720
721err_rx_clear:
722 sis190_rx_clear(tp);
723 return -ENOMEM;
724}
725
726static void sis190_set_rx_mode(struct net_device *dev)
727{
728 struct sis190_private *tp = netdev_priv(dev);
729 void __iomem *ioaddr = tp->mmio_addr;
730 unsigned long flags;
731 u32 mc_filter[2]; /* Multicast hash filter */
732 u16 rx_mode;
733
734 if (dev->flags & IFF_PROMISC) {
735 /* Unconditionally log net taps. */
736 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
737 dev->name);
738 rx_mode =
739 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
740 AcceptAllPhys;
741 mc_filter[1] = mc_filter[0] = 0xffffffff;
742 } else if ((dev->mc_count > multicast_filter_limit) ||
743 (dev->flags & IFF_ALLMULTI)) {
744 /* Too many to filter perfectly -- accept all multicasts. */
745 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
746 mc_filter[1] = mc_filter[0] = 0xffffffff;
747 } else {
748 struct dev_mc_list *mclist;
749 unsigned int i;
750
751 rx_mode = AcceptBroadcast | AcceptMyPhys;
752 mc_filter[1] = mc_filter[0] = 0;
753 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
754 i++, mclist = mclist->next) {
755 int bit_nr =
756 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
757 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
758 rx_mode |= AcceptMulticast;
759 }
760 }
761
762 spin_lock_irqsave(&tp->lock, flags);
763
764 SIS_W16(RxMacControl, rx_mode | 0x2);
765 SIS_W32(RxHashTable, mc_filter[0]);
766 SIS_W32(RxHashTable + 4, mc_filter[1]);
767
768 spin_unlock_irqrestore(&tp->lock, flags);
769}
770
771static void sis190_soft_reset(void __iomem *ioaddr)
772{
773 SIS_W32(IntrControl, 0x8000);
774 SIS_PCI_COMMIT();
775 msleep(1);
776 SIS_W32(IntrControl, 0x0);
777 sis190_asic_down(ioaddr);
778 msleep(1);
779}
780
781static void sis190_hw_start(struct net_device *dev)
782{
783 struct sis190_private *tp = netdev_priv(dev);
784 void __iomem *ioaddr = tp->mmio_addr;
785
786 sis190_soft_reset(ioaddr);
787
788 SIS_W32(TxDescStartAddr, tp->tx_dma);
789 SIS_W32(RxDescStartAddr, tp->rx_dma);
790
791 SIS_W32(IntrStatus, 0xffffffff);
792 SIS_W32(IntrMask, 0x0);
793 /*
794 * Default is 100Mbps.
795 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
796 */
797 SIS_W16(StationControl, 0x1901);
798 SIS_W32(GMIIControl, 0x0);
799 SIS_W32(TxMacControl, 0x60);
800 SIS_W16(RxMacControl, 0x02);
801 SIS_W32(RxHashTable, 0x0);
802 SIS_W32(0x6c, 0x0);
188f23ba
FR
803 SIS_W32(RxWolCtrl, 0x0);
804 SIS_W32(RxWolData, 0x0);
890e8d0a
FR
805
806 SIS_PCI_COMMIT();
807
808 sis190_set_rx_mode(dev);
809
810 /* Enable all known interrupts by setting the interrupt mask. */
811 SIS_W32(IntrMask, sis190_intr_mask);
812
813 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
814 SIS_W32(RxControl, 0x1a1d);
815
816 netif_start_queue(dev);
817}
818
819static void sis190_phy_task(void * data)
820{
821 struct net_device *dev = data;
822 struct sis190_private *tp = netdev_priv(dev);
823 void __iomem *ioaddr = tp->mmio_addr;
824 u16 val;
825
43afb949
FR
826 rtnl_lock();
827
890e8d0a
FR
828 val = mdio_read(ioaddr, MII_BMCR);
829 if (val & BMCR_RESET) {
830 // FIXME: needlessly high ? -- FR 02/07/2005
831 mod_timer(&tp->timer, jiffies + HZ/10);
832 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
833 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
834 dev->name);
835 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
836 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
837 } else {
838 /* Rejoice ! */
839 struct {
840 int val;
841 const char *msg;
842 u16 ctl;
843 } reg31[] = {
844 { LPA_1000XFULL | LPA_SLCT,
845 "1000 Mbps Full Duplex",
846 0x01 | _1000bpsF },
847 { LPA_1000XHALF | LPA_SLCT,
848 "1000 Mbps Half Duplex",
849 0x01 | _1000bpsH },
850 { LPA_100FULL,
851 "100 Mbps Full Duplex",
852 0x01 | _100bpsF },
853 { LPA_100HALF,
854 "100 Mbps Half Duplex",
855 0x01 | _100bpsH },
856 { LPA_10FULL,
857 "10 Mbps Full Duplex",
858 0x01 | _10bpsF },
859 { LPA_10HALF,
860 "10 Mbps Half Duplex",
861 0x01 | _10bpsH },
862 { 0, "unknown", 0x0000 }
863 }, *p;
864
865 val = mdio_read(ioaddr, 0x1f);
866 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
867
868 val = mdio_read(ioaddr, MII_LPA);
869 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
870
871 for (p = reg31; p->ctl; p++) {
872 if ((val & p->val) == p->val)
873 break;
874 }
875 if (p->ctl)
876 SIS_W16(StationControl, p->ctl);
877 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
878 p->msg);
879 netif_carrier_on(dev);
880 }
43afb949
FR
881
882 rtnl_unlock();
890e8d0a
FR
883}
884
885static void sis190_phy_timer(unsigned long __opaque)
886{
887 struct net_device *dev = (struct net_device *)__opaque;
888 struct sis190_private *tp = netdev_priv(dev);
889
890 if (likely(netif_running(dev)))
891 schedule_work(&tp->phy_task);
892}
893
894static inline void sis190_delete_timer(struct net_device *dev)
895{
896 struct sis190_private *tp = netdev_priv(dev);
897
898 del_timer_sync(&tp->timer);
899}
900
901static inline void sis190_request_timer(struct net_device *dev)
902{
903 struct sis190_private *tp = netdev_priv(dev);
904 struct timer_list *timer = &tp->timer;
905
906 init_timer(timer);
907 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
908 timer->data = (unsigned long)dev;
909 timer->function = sis190_phy_timer;
910 add_timer(timer);
911}
912
913static void sis190_set_rxbufsize(struct sis190_private *tp,
914 struct net_device *dev)
915{
916 unsigned int mtu = dev->mtu;
917
918 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
919}
920
921static int sis190_open(struct net_device *dev)
922{
923 struct sis190_private *tp = netdev_priv(dev);
924 struct pci_dev *pdev = tp->pci_dev;
925 int rc = -ENOMEM;
926
927 sis190_set_rxbufsize(tp, dev);
928
929 /*
930 * Rx and Tx descriptors need 256 bytes alignment.
931 * pci_alloc_consistent() guarantees a stronger alignment.
932 */
933 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
934 if (!tp->TxDescRing)
935 goto out;
936
937 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
938 if (!tp->RxDescRing)
939 goto err_free_tx_0;
940
941 rc = sis190_init_ring(dev);
942 if (rc < 0)
943 goto err_free_rx_1;
944
945 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
946
947 sis190_request_timer(dev);
948
949 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
950 if (rc < 0)
951 goto err_release_timer_2;
952
953 sis190_hw_start(dev);
954out:
955 return rc;
956
957err_release_timer_2:
958 sis190_delete_timer(dev);
959 sis190_rx_clear(tp);
960err_free_rx_1:
961 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
962 tp->rx_dma);
963err_free_tx_0:
964 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
965 tp->tx_dma);
966 goto out;
967}
968
969static void sis190_tx_clear(struct sis190_private *tp)
970{
971 unsigned int i;
972
973 for (i = 0; i < NUM_TX_DESC; i++) {
974 struct sk_buff *skb = tp->Tx_skbuff[i];
975
976 if (!skb)
977 continue;
978
979 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
980 tp->Tx_skbuff[i] = NULL;
981 dev_kfree_skb(skb);
982
983 tp->stats.tx_dropped++;
984 }
985 tp->cur_tx = tp->dirty_tx = 0;
986}
987
988static void sis190_down(struct net_device *dev)
989{
990 struct sis190_private *tp = netdev_priv(dev);
991 void __iomem *ioaddr = tp->mmio_addr;
992 unsigned int poll_locked = 0;
993
994 sis190_delete_timer(dev);
995
996 netif_stop_queue(dev);
997
998 flush_scheduled_work();
999
1000 do {
1001 spin_lock_irq(&tp->lock);
1002
1003 sis190_asic_down(ioaddr);
1004
1005 spin_unlock_irq(&tp->lock);
1006
1007 synchronize_irq(dev->irq);
1008
1009 if (!poll_locked) {
1010 netif_poll_disable(dev);
1011 poll_locked++;
1012 }
1013
1014 synchronize_sched();
1015
1016 } while (SIS_R32(IntrMask));
1017
1018 sis190_tx_clear(tp);
1019 sis190_rx_clear(tp);
1020}
1021
1022static int sis190_close(struct net_device *dev)
1023{
1024 struct sis190_private *tp = netdev_priv(dev);
1025 struct pci_dev *pdev = tp->pci_dev;
1026
1027 sis190_down(dev);
1028
1029 free_irq(dev->irq, dev);
1030
1031 netif_poll_enable(dev);
1032
1033 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1034 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1035
1036 tp->TxDescRing = NULL;
1037 tp->RxDescRing = NULL;
1038
1039 return 0;
1040}
1041
1042static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1043{
1044 struct sis190_private *tp = netdev_priv(dev);
1045 void __iomem *ioaddr = tp->mmio_addr;
1046 u32 len, entry, dirty_tx;
1047 struct TxDesc *desc;
1048 dma_addr_t mapping;
1049
1050 if (unlikely(skb->len < ETH_ZLEN)) {
1051 skb = skb_padto(skb, ETH_ZLEN);
1052 if (!skb) {
1053 tp->stats.tx_dropped++;
1054 goto out;
1055 }
1056 len = ETH_ZLEN;
1057 } else {
1058 len = skb->len;
1059 }
1060
1061 entry = tp->cur_tx % NUM_TX_DESC;
1062 desc = tp->TxDescRing + entry;
1063
1064 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1065 netif_stop_queue(dev);
1066 net_tx_err(tp, KERN_ERR PFX
1067 "%s: BUG! Tx Ring full when queue awake!\n",
1068 dev->name);
1069 return NETDEV_TX_BUSY;
1070 }
1071
1072 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1073
1074 tp->Tx_skbuff[entry] = skb;
1075
1076 desc->PSize = cpu_to_le32(len);
1077 desc->addr = cpu_to_le32(mapping);
1078
1079 desc->size = cpu_to_le32(len);
1080 if (entry == (NUM_TX_DESC - 1))
1081 desc->size |= cpu_to_le32(RingEnd);
1082
1083 wmb();
1084
1085 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1086
1087 tp->cur_tx++;
1088
1089 smp_wmb();
1090
1091 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1092
1093 dev->trans_start = jiffies;
1094
1095 dirty_tx = tp->dirty_tx;
1096 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1097 netif_stop_queue(dev);
1098 smp_rmb();
1099 if (dirty_tx != tp->dirty_tx)
1100 netif_wake_queue(dev);
1101 }
1102out:
1103 return NETDEV_TX_OK;
1104}
1105
1106static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1107{
1108 struct sis190_private *tp = netdev_priv(dev);
1109
1110 return &tp->stats;
1111}
1112
1113static void sis190_release_board(struct pci_dev *pdev)
1114{
1115 struct net_device *dev = pci_get_drvdata(pdev);
1116 struct sis190_private *tp = netdev_priv(dev);
1117
1118 iounmap(tp->mmio_addr);
1119 pci_release_regions(pdev);
1120 pci_disable_device(pdev);
1121 free_netdev(dev);
1122}
1123
1124static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1125{
1126 struct sis190_private *tp;
1127 struct net_device *dev;
1128 void __iomem *ioaddr;
1129 int rc;
1130
1131 dev = alloc_etherdev(sizeof(*tp));
1132 if (!dev) {
1133 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1134 rc = -ENOMEM;
1135 goto err_out_0;
1136 }
1137
1138 SET_MODULE_OWNER(dev);
1139 SET_NETDEV_DEV(dev, &pdev->dev);
1140
1141 tp = netdev_priv(dev);
1142 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1143
1144 rc = pci_enable_device(pdev);
1145 if (rc < 0) {
1146 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1147 goto err_free_dev_1;
1148 }
1149
1150 rc = -ENODEV;
1151
1152 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1153 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1154 pci_name(pdev));
1155 goto err_pci_disable_2;
1156 }
1157 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1158 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1159 pci_name(pdev));
1160 goto err_pci_disable_2;
1161 }
1162
1163 rc = pci_request_regions(pdev, DRV_NAME);
1164 if (rc < 0) {
1165 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1166 pci_name(pdev));
1167 goto err_pci_disable_2;
1168 }
1169
1170 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1171 if (rc < 0) {
1172 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1173 pci_name(pdev));
1174 goto err_free_res_3;
1175 }
1176
1177 pci_set_master(pdev);
1178
1179 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1180 if (!ioaddr) {
1181 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1182 pci_name(pdev));
1183 rc = -EIO;
1184 goto err_free_res_3;
1185 }
1186
1187 tp->pci_dev = pdev;
1188 tp->mmio_addr = ioaddr;
1189
43afb949
FR
1190 tp->mii_if.dev = dev;
1191 tp->mii_if.mdio_read = __mdio_read;
1192 tp->mii_if.mdio_write = __mdio_write;
1193 // tp->mii_if.phy_id = XXX;
1194 tp->mii_if.phy_id_mask = 0x1f;
1195 tp->mii_if.reg_num_mask = 0x1f;
1196
890e8d0a
FR
1197 sis190_irq_mask_and_ack(ioaddr);
1198
1199 sis190_soft_reset(ioaddr);
1200out:
1201 return dev;
1202
1203err_free_res_3:
1204 pci_release_regions(pdev);
1205err_pci_disable_2:
1206 pci_disable_device(pdev);
1207err_free_dev_1:
1208 free_netdev(dev);
1209err_out_0:
1210 dev = ERR_PTR(rc);
1211 goto out;
1212}
1213
1214static void sis190_tx_timeout(struct net_device *dev)
1215{
1216 struct sis190_private *tp = netdev_priv(dev);
1217 void __iomem *ioaddr = tp->mmio_addr;
1218 u8 tmp8;
1219
1220 /* Disable Tx, if not already */
1221 tmp8 = SIS_R8(TxControl);
1222 if (tmp8 & CmdTxEnb)
1223 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1224
188f23ba
FR
1225
1226 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1227 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1228
890e8d0a
FR
1229 /* Disable interrupts by clearing the interrupt mask. */
1230 SIS_W32(IntrMask, 0x0000);
1231
1232 /* Stop a shared interrupt from scavenging while we are. */
1233 spin_lock_irq(&tp->lock);
1234 sis190_tx_clear(tp);
1235 spin_unlock_irq(&tp->lock);
1236
1237 /* ...and finally, reset everything. */
1238 sis190_hw_start(dev);
1239
1240 netif_wake_queue(dev);
1241}
1242
1243static void sis190_set_speed_auto(struct net_device *dev)
1244{
1245 struct sis190_private *tp = netdev_priv(dev);
1246 void __iomem *ioaddr = tp->mmio_addr;
1247 int val;
1248
1249 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1250
1251 val = mdio_read(ioaddr, MII_ADVERTISE);
1252
1253 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1254 // unchanged.
1255 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1256 ADVERTISE_100FULL | ADVERTISE_10FULL |
1257 ADVERTISE_100HALF | ADVERTISE_10HALF);
1258
1259 // Enable 1000 Full Mode.
1260 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1261
1262 // Enable auto-negotiation and restart auto-negotiation.
1263 mdio_write(ioaddr, MII_BMCR,
1264 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1265}
1266
43afb949
FR
1267static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1268{
1269 struct sis190_private *tp = netdev_priv(dev);
1270
1271 return mii_ethtool_gset(&tp->mii_if, cmd);
1272}
1273
1274static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1275{
1276 struct sis190_private *tp = netdev_priv(dev);
1277
1278 return mii_ethtool_sset(&tp->mii_if, cmd);
1279}
1280
890e8d0a
FR
1281static void sis190_get_drvinfo(struct net_device *dev,
1282 struct ethtool_drvinfo *info)
1283{
1284 struct sis190_private *tp = netdev_priv(dev);
1285
1286 strcpy(info->driver, DRV_NAME);
1287 strcpy(info->version, DRV_VERSION);
1288 strcpy(info->bus_info, pci_name(tp->pci_dev));
1289}
1290
1291static int sis190_get_regs_len(struct net_device *dev)
1292{
1293 return SIS190_REGS_SIZE;
1294}
1295
1296static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1297 void *p)
1298{
1299 struct sis190_private *tp = netdev_priv(dev);
1300 unsigned long flags;
1301
1302 if (regs->len > SIS190_REGS_SIZE)
1303 regs->len = SIS190_REGS_SIZE;
1304
1305 spin_lock_irqsave(&tp->lock, flags);
1306 memcpy_fromio(p, tp->mmio_addr, regs->len);
1307 spin_unlock_irqrestore(&tp->lock, flags);
1308}
1309
43afb949
FR
1310static int sis190_nway_reset(struct net_device *dev)
1311{
1312 struct sis190_private *tp = netdev_priv(dev);
1313
1314 return mii_nway_restart(&tp->mii_if);
1315}
1316
890e8d0a
FR
1317static u32 sis190_get_msglevel(struct net_device *dev)
1318{
1319 struct sis190_private *tp = netdev_priv(dev);
1320
1321 return tp->msg_enable;
1322}
1323
1324static void sis190_set_msglevel(struct net_device *dev, u32 value)
1325{
1326 struct sis190_private *tp = netdev_priv(dev);
1327
1328 tp->msg_enable = value;
1329}
1330
1331static struct ethtool_ops sis190_ethtool_ops = {
43afb949
FR
1332 .get_settings = sis190_get_settings,
1333 .set_settings = sis190_set_settings,
890e8d0a
FR
1334 .get_drvinfo = sis190_get_drvinfo,
1335 .get_regs_len = sis190_get_regs_len,
1336 .get_regs = sis190_get_regs,
1337 .get_link = ethtool_op_get_link,
1338 .get_msglevel = sis190_get_msglevel,
1339 .set_msglevel = sis190_set_msglevel,
43afb949 1340 .nway_reset = sis190_nway_reset,
890e8d0a
FR
1341};
1342
43afb949
FR
1343static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1344{
1345 struct sis190_private *tp = netdev_priv(dev);
1346
1347 return !netif_running(dev) ? -EINVAL :
1348 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1349}
1350
890e8d0a
FR
1351static int __devinit sis190_init_one(struct pci_dev *pdev,
1352 const struct pci_device_id *ent)
1353{
1354 static int printed_version = 0;
1355 struct sis190_private *tp;
1356 struct net_device *dev;
1357 void __iomem *ioaddr;
1358 int i, rc;
1359
1360 if (!printed_version) {
1361 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1362 printed_version = 1;
1363 }
1364
1365 dev = sis190_init_board(pdev);
1366 if (IS_ERR(dev)) {
1367 rc = PTR_ERR(dev);
1368 goto out;
1369 }
1370
1371 tp = netdev_priv(dev);
1372 ioaddr = tp->mmio_addr;
1373
1374 /* Get MAC address */
1375 /* Read node address from the EEPROM */
1376
1377 if (SIS_R32(ROMControl) & 0x4) {
1378 for (i = 0; i < 3; i++) {
1379 SIS_W16(RxMacAddr + 2*i,
1380 sis190_read_eeprom(ioaddr, 3 + i));
1381 }
1382 }
1383
1384 for (i = 0; i < MAC_ADDR_LEN; i++)
1385 dev->dev_addr[i] = SIS_R8(RxMacAddr + i);
1386
1387 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1388
1389 dev->open = sis190_open;
1390 dev->stop = sis190_close;
43afb949 1391 dev->do_ioctl = sis190_ioctl;
890e8d0a
FR
1392 dev->get_stats = sis190_get_stats;
1393 dev->tx_timeout = sis190_tx_timeout;
1394 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1395 dev->hard_start_xmit = sis190_start_xmit;
4405d3b5
FR
1396#ifdef CONFIG_NET_POLL_CONTROLLER
1397 dev->poll_controller = sis190_netpoll;
1398#endif
890e8d0a
FR
1399 dev->set_multicast_list = sis190_set_rx_mode;
1400 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1401 dev->irq = pdev->irq;
1402 dev->base_addr = (unsigned long) 0xdead;
1403
1404 spin_lock_init(&tp->lock);
1405 rc = register_netdev(dev);
1406 if (rc < 0) {
1407 sis190_release_board(pdev);
1408 goto out;
1409 }
1410
1411 pci_set_drvdata(pdev, dev);
1412
1413 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1414 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1415 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1416 ioaddr, dev->irq,
1417 dev->dev_addr[0], dev->dev_addr[1],
1418 dev->dev_addr[2], dev->dev_addr[3],
1419 dev->dev_addr[4], dev->dev_addr[5]);
1420
1421 netif_carrier_off(dev);
1422
1423 sis190_set_speed_auto(dev);
1424out:
1425 return rc;
1426}
1427
1428static void __devexit sis190_remove_one(struct pci_dev *pdev)
1429{
1430 struct net_device *dev = pci_get_drvdata(pdev);
1431
1432 unregister_netdev(dev);
1433 sis190_release_board(pdev);
1434 pci_set_drvdata(pdev, NULL);
1435}
1436
1437static struct pci_driver sis190_pci_driver = {
1438 .name = DRV_NAME,
1439 .id_table = sis190_pci_tbl,
1440 .probe = sis190_init_one,
1441 .remove = __devexit_p(sis190_remove_one),
1442};
1443
1444static int __init sis190_init_module(void)
1445{
1446 return pci_module_init(&sis190_pci_driver);
1447}
1448
1449static void __exit sis190_cleanup_module(void)
1450{
1451 pci_unregister_driver(&sis190_pci_driver);
1452}
1453
1454module_init(sis190_init_module);
1455module_exit(sis190_cleanup_module);