[PATCH] sis190: netconsole support.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / sis190.c
CommitLineData
890e8d0a
FR
1/*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c and probably even epic100.c.
9
10 This software may be used and distributed according to the terms of
11 the GNU General Public License (GPL), incorporated herein by reference.
12 Drivers based on or derived from this code fall under the GPL and must
13 retain the authorship, copyright and license notice. This file is not
14 a complete program and may only be used when the entire operating
15 system is licensed under the GPL.
16
17 See the file COPYING in this distribution for more information.
18
19 */
20
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/ethtool.h>
26#include <linux/pci.h>
27#include <linux/mii.h>
28#include <linux/delay.h>
29#include <linux/crc32.h>
30#include <linux/dma-mapping.h>
31#include <asm/irq.h>
32
33#define net_drv(p, arg...) if (netif_msg_drv(p)) \
34 printk(arg)
35#define net_probe(p, arg...) if (netif_msg_probe(p)) \
36 printk(arg)
37#define net_link(p, arg...) if (netif_msg_link(p)) \
38 printk(arg)
39#define net_intr(p, arg...) if (netif_msg_intr(p)) \
40 printk(arg)
41#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
42 printk(arg)
43
44#ifdef CONFIG_SIS190_NAPI
45#define NAPI_SUFFIX "-NAPI"
46#else
47#define NAPI_SUFFIX ""
48#endif
49
50#define DRV_VERSION "1.2" NAPI_SUFFIX
51#define DRV_NAME "sis190"
52#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53#define PFX DRV_NAME ": "
54
55#ifdef CONFIG_SIS190_NAPI
56#define sis190_rx_skb netif_receive_skb
57#define sis190_rx_quota(count, quota) min(count, quota)
58#else
59#define sis190_rx_skb netif_rx
60#define sis190_rx_quota(count, quota) count
61#endif
62
63#define MAC_ADDR_LEN 6
64
65#define NUM_TX_DESC 64
66#define NUM_RX_DESC 64
67#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
68#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
69#define RX_BUF_SIZE 1536
70
71#define SIS190_REGS_SIZE 0x80
72#define SIS190_TX_TIMEOUT (6*HZ)
73#define SIS190_PHY_TIMEOUT (10*HZ)
74#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
75 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
76 NETIF_MSG_IFDOWN)
77
78/* Enhanced PHY access register bit definitions */
79#define EhnMIIread 0x0000
80#define EhnMIIwrite 0x0020
81#define EhnMIIdataShift 16
82#define EhnMIIpmdShift 6 /* 7016 only */
83#define EhnMIIregShift 11
84#define EhnMIIreq 0x0010
85#define EhnMIInotDone 0x0010
86
87/* Write/read MMIO register */
88#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
89#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
90#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
91#define SIS_R8(reg) readb (ioaddr + (reg))
92#define SIS_R16(reg) readw (ioaddr + (reg))
93#define SIS_R32(reg) readl (ioaddr + (reg))
94
95#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
96
97enum sis190_registers {
98 TxControl = 0x00,
99 TxDescStartAddr = 0x04,
100 TxNextDescAddr = 0x0c, // unused
101 RxControl = 0x10,
102 RxDescStartAddr = 0x14,
103 RxNextDescAddr = 0x1c, // unused
104 IntrStatus = 0x20,
105 IntrMask = 0x24,
106 IntrControl = 0x28,
107 IntrTimer = 0x2c, // unused
108 PMControl = 0x30, // unused
109 ROMControl = 0x38,
110 ROMInterface = 0x3c,
111 StationControl = 0x40,
112 GMIIControl = 0x44,
113 TxMacControl = 0x50,
114 RxMacControl = 0x60,
115 RxMacAddr = 0x62,
116 RxHashTable = 0x68,
117 // Undocumented = 0x6c,
118 RxWakeOnLan = 0x70,
119 // Undocumented = 0x74,
120 RxMPSControl = 0x78, // unused
121};
122
123enum sis190_register_content {
124 /* IntrStatus */
125 SoftInt = 0x40000000, // unused
126 Timeup = 0x20000000, // unused
127 PauseFrame = 0x00080000, // unused
128 MagicPacket = 0x00040000, // unused
129 WakeupFrame = 0x00020000, // unused
130 LinkChange = 0x00010000,
131 RxQEmpty = 0x00000080,
132 RxQInt = 0x00000040,
133 TxQ1Empty = 0x00000020, // unused
134 TxQ1Int = 0x00000010,
135 TxQ0Empty = 0x00000008, // unused
136 TxQ0Int = 0x00000004,
137 RxHalt = 0x00000002,
138 TxHalt = 0x00000001,
139
140 /* RxStatusDesc */
141 RxRES = 0x00200000, // unused
142 RxCRC = 0x00080000,
143 RxRUNT = 0x00100000, // unused
144 RxRWT = 0x00400000, // unused
145
146 /* {Rx/Tx}CmdBits */
147 CmdReset = 0x10,
148 CmdRxEnb = 0x08, // unused
149 CmdTxEnb = 0x01,
150 RxBufEmpty = 0x01, // unused
151
152 /* Cfg9346Bits */
153 Cfg9346_Lock = 0x00, // unused
154 Cfg9346_Unlock = 0xc0, // unused
155
156 /* RxMacControl */
157 AcceptErr = 0x20, // unused
158 AcceptRunt = 0x10, // unused
159 AcceptBroadcast = 0x0800,
160 AcceptMulticast = 0x0400,
161 AcceptMyPhys = 0x0200,
162 AcceptAllPhys = 0x0100,
163
164 /* RxConfigBits */
165 RxCfgFIFOShift = 13,
166 RxCfgDMAShift = 8, // 0x1a in RxControl ?
167
168 /* TxConfigBits */
169 TxInterFrameGapShift = 24,
170 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
171
172 /* StationControl */
173 _1000bpsF = 0x1c00,
174 _1000bpsH = 0x0c00,
175 _100bpsF = 0x1800,
176 _100bpsH = 0x0800,
177 _10bpsF = 0x1400,
178 _10bpsH = 0x0400,
179
180 LinkStatus = 0x02, // unused
181 FullDup = 0x01, // unused
182
183 /* TBICSRBit */
184 TBILinkOK = 0x02000000, // unused
185};
186
187struct TxDesc {
188 u32 PSize;
189 u32 status;
190 u32 addr;
191 u32 size;
192};
193
194struct RxDesc {
195 u32 PSize;
196 u32 status;
197 u32 addr;
198 u32 size;
199};
200
201enum _DescStatusBit {
202 /* _Desc.status */
203 OWNbit = 0x80000000,
204 INTbit = 0x40000000,
205 DEFbit = 0x00200000,
206 CRCbit = 0x00020000,
207 PADbit = 0x00010000,
208 /* _Desc.size */
209 RingEnd = (1 << 31),
210 /* _Desc.PSize */
211 RxSizeMask = 0x0000ffff
212};
213
214struct sis190_private {
215 void __iomem *mmio_addr;
216 struct pci_dev *pci_dev;
217 struct net_device_stats stats;
218 spinlock_t lock;
219 u32 rx_buf_sz;
220 u32 cur_rx;
221 u32 cur_tx;
222 u32 dirty_rx;
223 u32 dirty_tx;
224 dma_addr_t rx_dma;
225 dma_addr_t tx_dma;
226 struct RxDesc *RxDescRing;
227 struct TxDesc *TxDescRing;
228 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
229 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
230 struct work_struct phy_task;
231 struct timer_list timer;
232 u32 msg_enable;
233};
234
235const static struct {
236 const char *name;
237 u8 version; /* depend on docs */
238 u32 RxConfigMask; /* clear the bits supported by this chip */
239} sis_chip_info[] = {
240 { DRV_NAME, 0x00, 0xff7e1880, },
241};
242
243static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
244 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
245 { 0, },
246};
247
248MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
249
250static int rx_copybreak = 200;
251
252static struct {
253 u32 msg_enable;
254} debug = { -1 };
255
256MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
257module_param(rx_copybreak, int, 0);
258MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
259module_param_named(debug, debug.msg_enable, int, 0);
260MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
261MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
262MODULE_VERSION(DRV_VERSION);
263MODULE_LICENSE("GPL");
264
265static const u32 sis190_intr_mask =
266 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
267
268/*
269 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
270 * The chips use a 64 element hash table based on the Ethernet CRC.
271 */
272static int multicast_filter_limit = 32;
273
274static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
275{
276 unsigned int i;
277
278 SIS_W32(GMIIControl, ctl);
279
280 msleep(1);
281
282 for (i = 0; i < 100; i++) {
283 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
284 break;
285 msleep(1);
286 }
287
288 if (i > 999)
289 printk(KERN_ERR PFX "PHY command failed !\n");
290}
291
292static void mdio_write(void __iomem *ioaddr, int reg, int val)
293{
294 u32 pmd = 1;
295
296 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
297 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
298 (((u32) val) << EhnMIIdataShift));
299}
300
301static int mdio_read(void __iomem *ioaddr, int reg)
302{
303 u32 pmd = 1;
304
305 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
306 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
307
308 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
309}
310
311static int sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
312{
313 unsigned int i;
314 u16 data;
315 u32 val;
316
317 if (!(SIS_R32(ROMControl) & 0x0002))
318 return 0;
319
320 val = (0x0080 | (0x2 << 8) | (reg << 10));
321
322 SIS_W32(ROMInterface, val);
323
324 for (i = 0; i < 200; i++) {
325 if (!(SIS_R32(ROMInterface) & 0x0080))
326 break;
327 msleep(1);
328 }
329
330 data = (u16) ((SIS_R32(ROMInterface) & 0xffff0000) >> 16);
331
332 return data;
333}
334
335static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
336{
337 SIS_W32(IntrMask, 0x00);
338 SIS_W32(IntrStatus, 0xffffffff);
339 SIS_PCI_COMMIT();
340}
341
342static void sis190_asic_down(void __iomem *ioaddr)
343{
344 /* Stop the chip's Tx and Rx DMA processes. */
345
346 SIS_W32(TxControl, 0x1a00);
347 SIS_W32(RxControl, 0x1a00);
348
349 sis190_irq_mask_and_ack(ioaddr);
350}
351
352static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
353{
354 desc->size |= cpu_to_le32(RingEnd);
355}
356
357static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
358{
359 u32 eor = le32_to_cpu(desc->size) & RingEnd;
360
361 desc->PSize = 0x0;
362 desc->size = cpu_to_le32(rx_buf_sz | eor);
363 wmb();
364 desc->status = cpu_to_le32(OWNbit | INTbit);
365}
366
367static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
368 u32 rx_buf_sz)
369{
370 desc->addr = cpu_to_le32(mapping);
371 sis190_give_to_asic(desc, rx_buf_sz);
372}
373
374static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
375{
376 desc->PSize = 0x0;
377 desc->addr = 0xdeadbeef;
378 desc->size &= cpu_to_le32(RingEnd);
379 wmb();
380 desc->status = 0x0;
381}
382
383static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
384 struct RxDesc *desc, u32 rx_buf_sz)
385{
386 struct sk_buff *skb;
387 dma_addr_t mapping;
388 int ret = 0;
389
390 skb = dev_alloc_skb(rx_buf_sz);
391 if (!skb)
392 goto err_out;
393
394 *sk_buff = skb;
395
396 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
397 PCI_DMA_FROMDEVICE);
398
399 sis190_map_to_asic(desc, mapping, rx_buf_sz);
400out:
401 return ret;
402
403err_out:
404 ret = -ENOMEM;
405 sis190_make_unusable_by_asic(desc);
406 goto out;
407}
408
409static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
410 u32 start, u32 end)
411{
412 u32 cur;
413
414 for (cur = start; cur < end; cur++) {
415 int ret, i = cur % NUM_RX_DESC;
416
417 if (tp->Rx_skbuff[i])
418 continue;
419
420 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
421 tp->RxDescRing + i, tp->rx_buf_sz);
422 if (ret < 0)
423 break;
424 }
425 return cur - start;
426}
427
428static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
429 struct RxDesc *desc, int rx_buf_sz)
430{
431 int ret = -1;
432
433 if (pkt_size < rx_copybreak) {
434 struct sk_buff *skb;
435
436 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
437 if (skb) {
438 skb_reserve(skb, NET_IP_ALIGN);
439 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
440 *sk_buff = skb;
441 sis190_give_to_asic(desc, rx_buf_sz);
442 ret = 0;
443 }
444 }
445 return ret;
446}
447
448static int sis190_rx_interrupt(struct net_device *dev,
449 struct sis190_private *tp, void __iomem *ioaddr)
450{
451 struct net_device_stats *stats = &tp->stats;
452 u32 rx_left, cur_rx = tp->cur_rx;
453 u32 delta, count;
454
455 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
456 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
457
458 for (; rx_left > 0; rx_left--, cur_rx++) {
459 unsigned int entry = cur_rx % NUM_RX_DESC;
460 struct RxDesc *desc = tp->RxDescRing + entry;
461 u32 status;
462
463 if (desc->status & OWNbit)
464 break;
465
466 status = le32_to_cpu(desc->PSize);
467
468 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
469 // status);
470
471 if (status & RxCRC) {
472 net_intr(tp, KERN_INFO "%s: bad crc. status = %08x.\n",
473 dev->name, status);
474 stats->rx_errors++;
475 stats->rx_crc_errors++;
476 sis190_give_to_asic(desc, tp->rx_buf_sz);
477 } else if (!(status & PADbit)) {
478 net_intr(tp, KERN_INFO "%s: bad pad. status = %08x.\n",
479 dev->name, status);
480 stats->rx_errors++;
481 stats->rx_length_errors++;
482 sis190_give_to_asic(desc, tp->rx_buf_sz);
483 } else {
484 struct sk_buff *skb = tp->Rx_skbuff[entry];
485 int pkt_size = (status & RxSizeMask) - 4;
486 void (*pci_action)(struct pci_dev *, dma_addr_t,
487 size_t, int) = pci_dma_sync_single_for_device;
488
489 if (unlikely(pkt_size > tp->rx_buf_sz)) {
490 net_intr(tp, KERN_INFO
491 "%s: (frag) status = %08x.\n",
492 dev->name, status);
493 stats->rx_dropped++;
494 stats->rx_length_errors++;
495 sis190_give_to_asic(desc, tp->rx_buf_sz);
496 continue;
497 }
498
499 pci_dma_sync_single_for_cpu(tp->pci_dev,
500 le32_to_cpu(desc->addr), tp->rx_buf_sz,
501 PCI_DMA_FROMDEVICE);
502
503 if (sis190_try_rx_copy(&skb, pkt_size, desc,
504 tp->rx_buf_sz)) {
505 pci_action = pci_unmap_single;
506 tp->Rx_skbuff[entry] = NULL;
507 sis190_make_unusable_by_asic(desc);
508 }
509
510 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
511 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
512
513 skb->dev = dev;
514 skb_put(skb, pkt_size);
515 skb->protocol = eth_type_trans(skb, dev);
516
517 sis190_rx_skb(skb);
518
519 dev->last_rx = jiffies;
520 stats->rx_bytes += pkt_size;
521 stats->rx_packets++;
522 }
523 }
524 count = cur_rx - tp->cur_rx;
525 tp->cur_rx = cur_rx;
526
527 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
528 if (!delta && count && netif_msg_intr(tp))
529 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
530 tp->dirty_rx += delta;
531
532 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
533 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
534
535 return count;
536}
537
538static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
539 struct TxDesc *desc)
540{
541 unsigned int len;
542
543 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
544
545 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
546
547 memset(desc, 0x00, sizeof(*desc));
548}
549
550static void sis190_tx_interrupt(struct net_device *dev,
551 struct sis190_private *tp, void __iomem *ioaddr)
552{
553 u32 pending, dirty_tx = tp->dirty_tx;
554 /*
555 * It would not be needed if queueing was allowed to be enabled
556 * again too early (hint: think preempt and unclocked smp systems).
557 */
558 unsigned int queue_stopped;
559
560 smp_rmb();
561 pending = tp->cur_tx - dirty_tx;
562 queue_stopped = (pending == NUM_TX_DESC);
563
564 for (; pending; pending--, dirty_tx++) {
565 unsigned int entry = dirty_tx % NUM_TX_DESC;
566 struct TxDesc *txd = tp->TxDescRing + entry;
567 struct sk_buff *skb;
568
569 if (le32_to_cpu(txd->status) & OWNbit)
570 break;
571
572 skb = tp->Tx_skbuff[entry];
573
574 tp->stats.tx_packets++;
575 tp->stats.tx_bytes += skb->len;
576
577 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
578 tp->Tx_skbuff[entry] = NULL;
579 dev_kfree_skb_irq(skb);
580 }
581
582 if (tp->dirty_tx != dirty_tx) {
583 tp->dirty_tx = dirty_tx;
584 smp_wmb();
585 if (queue_stopped)
586 netif_wake_queue(dev);
587 }
588}
589
590/*
591 * The interrupt handler does all of the Rx thread work and cleans up after
592 * the Tx thread.
593 */
594static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
595{
596 struct net_device *dev = __dev;
597 struct sis190_private *tp = netdev_priv(dev);
598 void __iomem *ioaddr = tp->mmio_addr;
599 unsigned int handled = 0;
600 u32 status;
601
602 status = SIS_R32(IntrStatus);
603
604 if ((status == 0xffffffff) || !status)
605 goto out;
606
607 handled = 1;
608
609 if (unlikely(!netif_running(dev))) {
610 sis190_asic_down(ioaddr);
611 goto out;
612 }
613
614 SIS_W32(IntrStatus, status);
615
616 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
617
618 if (status & LinkChange) {
619 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
620 schedule_work(&tp->phy_task);
621 }
622
623 if (status & RxQInt)
624 sis190_rx_interrupt(dev, tp, ioaddr);
625
626 if (status & TxQ0Int)
627 sis190_tx_interrupt(dev, tp, ioaddr);
628out:
629 return IRQ_RETVAL(handled);
630}
631
4405d3b5
FR
632#ifdef CONFIG_NET_POLL_CONTROLLER
633static void sis190_netpoll(struct net_device *dev)
634{
635 struct sis190_private *tp = netdev_priv(dev);
636 struct pci_dev *pdev = tp->pci_dev;
637
638 disable_irq(pdev->irq);
639 sis190_interrupt(pdev->irq, dev, NULL);
640 enable_irq(pdev->irq);
641}
642#endif
643
890e8d0a
FR
644static void sis190_free_rx_skb(struct sis190_private *tp,
645 struct sk_buff **sk_buff, struct RxDesc *desc)
646{
647 struct pci_dev *pdev = tp->pci_dev;
648
649 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
650 PCI_DMA_FROMDEVICE);
651 dev_kfree_skb(*sk_buff);
652 *sk_buff = NULL;
653 sis190_make_unusable_by_asic(desc);
654}
655
656static void sis190_rx_clear(struct sis190_private *tp)
657{
658 unsigned int i;
659
660 for (i = 0; i < NUM_RX_DESC; i++) {
661 if (!tp->Rx_skbuff[i])
662 continue;
663 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
664 }
665}
666
667static void sis190_init_ring_indexes(struct sis190_private *tp)
668{
669 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
670}
671
672static int sis190_init_ring(struct net_device *dev)
673{
674 struct sis190_private *tp = netdev_priv(dev);
675
676 sis190_init_ring_indexes(tp);
677
678 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
679 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
680
681 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
682 goto err_rx_clear;
683
684 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
685
686 return 0;
687
688err_rx_clear:
689 sis190_rx_clear(tp);
690 return -ENOMEM;
691}
692
693static void sis190_set_rx_mode(struct net_device *dev)
694{
695 struct sis190_private *tp = netdev_priv(dev);
696 void __iomem *ioaddr = tp->mmio_addr;
697 unsigned long flags;
698 u32 mc_filter[2]; /* Multicast hash filter */
699 u16 rx_mode;
700
701 if (dev->flags & IFF_PROMISC) {
702 /* Unconditionally log net taps. */
703 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
704 dev->name);
705 rx_mode =
706 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
707 AcceptAllPhys;
708 mc_filter[1] = mc_filter[0] = 0xffffffff;
709 } else if ((dev->mc_count > multicast_filter_limit) ||
710 (dev->flags & IFF_ALLMULTI)) {
711 /* Too many to filter perfectly -- accept all multicasts. */
712 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
713 mc_filter[1] = mc_filter[0] = 0xffffffff;
714 } else {
715 struct dev_mc_list *mclist;
716 unsigned int i;
717
718 rx_mode = AcceptBroadcast | AcceptMyPhys;
719 mc_filter[1] = mc_filter[0] = 0;
720 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
721 i++, mclist = mclist->next) {
722 int bit_nr =
723 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
724 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
725 rx_mode |= AcceptMulticast;
726 }
727 }
728
729 spin_lock_irqsave(&tp->lock, flags);
730
731 SIS_W16(RxMacControl, rx_mode | 0x2);
732 SIS_W32(RxHashTable, mc_filter[0]);
733 SIS_W32(RxHashTable + 4, mc_filter[1]);
734
735 spin_unlock_irqrestore(&tp->lock, flags);
736}
737
738static void sis190_soft_reset(void __iomem *ioaddr)
739{
740 SIS_W32(IntrControl, 0x8000);
741 SIS_PCI_COMMIT();
742 msleep(1);
743 SIS_W32(IntrControl, 0x0);
744 sis190_asic_down(ioaddr);
745 msleep(1);
746}
747
748static void sis190_hw_start(struct net_device *dev)
749{
750 struct sis190_private *tp = netdev_priv(dev);
751 void __iomem *ioaddr = tp->mmio_addr;
752
753 sis190_soft_reset(ioaddr);
754
755 SIS_W32(TxDescStartAddr, tp->tx_dma);
756 SIS_W32(RxDescStartAddr, tp->rx_dma);
757
758 SIS_W32(IntrStatus, 0xffffffff);
759 SIS_W32(IntrMask, 0x0);
760 /*
761 * Default is 100Mbps.
762 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
763 */
764 SIS_W16(StationControl, 0x1901);
765 SIS_W32(GMIIControl, 0x0);
766 SIS_W32(TxMacControl, 0x60);
767 SIS_W16(RxMacControl, 0x02);
768 SIS_W32(RxHashTable, 0x0);
769 SIS_W32(0x6c, 0x0);
770 SIS_W32(RxWakeOnLan, 0x0);
771 SIS_W32(0x74, 0x0);
772
773 SIS_PCI_COMMIT();
774
775 sis190_set_rx_mode(dev);
776
777 /* Enable all known interrupts by setting the interrupt mask. */
778 SIS_W32(IntrMask, sis190_intr_mask);
779
780 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
781 SIS_W32(RxControl, 0x1a1d);
782
783 netif_start_queue(dev);
784}
785
786static void sis190_phy_task(void * data)
787{
788 struct net_device *dev = data;
789 struct sis190_private *tp = netdev_priv(dev);
790 void __iomem *ioaddr = tp->mmio_addr;
791 u16 val;
792
793 val = mdio_read(ioaddr, MII_BMCR);
794 if (val & BMCR_RESET) {
795 // FIXME: needlessly high ? -- FR 02/07/2005
796 mod_timer(&tp->timer, jiffies + HZ/10);
797 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
798 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
799 dev->name);
800 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
801 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
802 } else {
803 /* Rejoice ! */
804 struct {
805 int val;
806 const char *msg;
807 u16 ctl;
808 } reg31[] = {
809 { LPA_1000XFULL | LPA_SLCT,
810 "1000 Mbps Full Duplex",
811 0x01 | _1000bpsF },
812 { LPA_1000XHALF | LPA_SLCT,
813 "1000 Mbps Half Duplex",
814 0x01 | _1000bpsH },
815 { LPA_100FULL,
816 "100 Mbps Full Duplex",
817 0x01 | _100bpsF },
818 { LPA_100HALF,
819 "100 Mbps Half Duplex",
820 0x01 | _100bpsH },
821 { LPA_10FULL,
822 "10 Mbps Full Duplex",
823 0x01 | _10bpsF },
824 { LPA_10HALF,
825 "10 Mbps Half Duplex",
826 0x01 | _10bpsH },
827 { 0, "unknown", 0x0000 }
828 }, *p;
829
830 val = mdio_read(ioaddr, 0x1f);
831 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
832
833 val = mdio_read(ioaddr, MII_LPA);
834 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
835
836 for (p = reg31; p->ctl; p++) {
837 if ((val & p->val) == p->val)
838 break;
839 }
840 if (p->ctl)
841 SIS_W16(StationControl, p->ctl);
842 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
843 p->msg);
844 netif_carrier_on(dev);
845 }
846}
847
848static void sis190_phy_timer(unsigned long __opaque)
849{
850 struct net_device *dev = (struct net_device *)__opaque;
851 struct sis190_private *tp = netdev_priv(dev);
852
853 if (likely(netif_running(dev)))
854 schedule_work(&tp->phy_task);
855}
856
857static inline void sis190_delete_timer(struct net_device *dev)
858{
859 struct sis190_private *tp = netdev_priv(dev);
860
861 del_timer_sync(&tp->timer);
862}
863
864static inline void sis190_request_timer(struct net_device *dev)
865{
866 struct sis190_private *tp = netdev_priv(dev);
867 struct timer_list *timer = &tp->timer;
868
869 init_timer(timer);
870 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
871 timer->data = (unsigned long)dev;
872 timer->function = sis190_phy_timer;
873 add_timer(timer);
874}
875
876static void sis190_set_rxbufsize(struct sis190_private *tp,
877 struct net_device *dev)
878{
879 unsigned int mtu = dev->mtu;
880
881 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
882}
883
884static int sis190_open(struct net_device *dev)
885{
886 struct sis190_private *tp = netdev_priv(dev);
887 struct pci_dev *pdev = tp->pci_dev;
888 int rc = -ENOMEM;
889
890 sis190_set_rxbufsize(tp, dev);
891
892 /*
893 * Rx and Tx descriptors need 256 bytes alignment.
894 * pci_alloc_consistent() guarantees a stronger alignment.
895 */
896 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
897 if (!tp->TxDescRing)
898 goto out;
899
900 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
901 if (!tp->RxDescRing)
902 goto err_free_tx_0;
903
904 rc = sis190_init_ring(dev);
905 if (rc < 0)
906 goto err_free_rx_1;
907
908 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
909
910 sis190_request_timer(dev);
911
912 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
913 if (rc < 0)
914 goto err_release_timer_2;
915
916 sis190_hw_start(dev);
917out:
918 return rc;
919
920err_release_timer_2:
921 sis190_delete_timer(dev);
922 sis190_rx_clear(tp);
923err_free_rx_1:
924 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
925 tp->rx_dma);
926err_free_tx_0:
927 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
928 tp->tx_dma);
929 goto out;
930}
931
932static void sis190_tx_clear(struct sis190_private *tp)
933{
934 unsigned int i;
935
936 for (i = 0; i < NUM_TX_DESC; i++) {
937 struct sk_buff *skb = tp->Tx_skbuff[i];
938
939 if (!skb)
940 continue;
941
942 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
943 tp->Tx_skbuff[i] = NULL;
944 dev_kfree_skb(skb);
945
946 tp->stats.tx_dropped++;
947 }
948 tp->cur_tx = tp->dirty_tx = 0;
949}
950
951static void sis190_down(struct net_device *dev)
952{
953 struct sis190_private *tp = netdev_priv(dev);
954 void __iomem *ioaddr = tp->mmio_addr;
955 unsigned int poll_locked = 0;
956
957 sis190_delete_timer(dev);
958
959 netif_stop_queue(dev);
960
961 flush_scheduled_work();
962
963 do {
964 spin_lock_irq(&tp->lock);
965
966 sis190_asic_down(ioaddr);
967
968 spin_unlock_irq(&tp->lock);
969
970 synchronize_irq(dev->irq);
971
972 if (!poll_locked) {
973 netif_poll_disable(dev);
974 poll_locked++;
975 }
976
977 synchronize_sched();
978
979 } while (SIS_R32(IntrMask));
980
981 sis190_tx_clear(tp);
982 sis190_rx_clear(tp);
983}
984
985static int sis190_close(struct net_device *dev)
986{
987 struct sis190_private *tp = netdev_priv(dev);
988 struct pci_dev *pdev = tp->pci_dev;
989
990 sis190_down(dev);
991
992 free_irq(dev->irq, dev);
993
994 netif_poll_enable(dev);
995
996 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
997 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
998
999 tp->TxDescRing = NULL;
1000 tp->RxDescRing = NULL;
1001
1002 return 0;
1003}
1004
1005static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1006{
1007 struct sis190_private *tp = netdev_priv(dev);
1008 void __iomem *ioaddr = tp->mmio_addr;
1009 u32 len, entry, dirty_tx;
1010 struct TxDesc *desc;
1011 dma_addr_t mapping;
1012
1013 if (unlikely(skb->len < ETH_ZLEN)) {
1014 skb = skb_padto(skb, ETH_ZLEN);
1015 if (!skb) {
1016 tp->stats.tx_dropped++;
1017 goto out;
1018 }
1019 len = ETH_ZLEN;
1020 } else {
1021 len = skb->len;
1022 }
1023
1024 entry = tp->cur_tx % NUM_TX_DESC;
1025 desc = tp->TxDescRing + entry;
1026
1027 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1028 netif_stop_queue(dev);
1029 net_tx_err(tp, KERN_ERR PFX
1030 "%s: BUG! Tx Ring full when queue awake!\n",
1031 dev->name);
1032 return NETDEV_TX_BUSY;
1033 }
1034
1035 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1036
1037 tp->Tx_skbuff[entry] = skb;
1038
1039 desc->PSize = cpu_to_le32(len);
1040 desc->addr = cpu_to_le32(mapping);
1041
1042 desc->size = cpu_to_le32(len);
1043 if (entry == (NUM_TX_DESC - 1))
1044 desc->size |= cpu_to_le32(RingEnd);
1045
1046 wmb();
1047
1048 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1049
1050 tp->cur_tx++;
1051
1052 smp_wmb();
1053
1054 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1055
1056 dev->trans_start = jiffies;
1057
1058 dirty_tx = tp->dirty_tx;
1059 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1060 netif_stop_queue(dev);
1061 smp_rmb();
1062 if (dirty_tx != tp->dirty_tx)
1063 netif_wake_queue(dev);
1064 }
1065out:
1066 return NETDEV_TX_OK;
1067}
1068
1069static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1070{
1071 struct sis190_private *tp = netdev_priv(dev);
1072
1073 return &tp->stats;
1074}
1075
1076static void sis190_release_board(struct pci_dev *pdev)
1077{
1078 struct net_device *dev = pci_get_drvdata(pdev);
1079 struct sis190_private *tp = netdev_priv(dev);
1080
1081 iounmap(tp->mmio_addr);
1082 pci_release_regions(pdev);
1083 pci_disable_device(pdev);
1084 free_netdev(dev);
1085}
1086
1087static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1088{
1089 struct sis190_private *tp;
1090 struct net_device *dev;
1091 void __iomem *ioaddr;
1092 int rc;
1093
1094 dev = alloc_etherdev(sizeof(*tp));
1095 if (!dev) {
1096 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1097 rc = -ENOMEM;
1098 goto err_out_0;
1099 }
1100
1101 SET_MODULE_OWNER(dev);
1102 SET_NETDEV_DEV(dev, &pdev->dev);
1103
1104 tp = netdev_priv(dev);
1105 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1106
1107 rc = pci_enable_device(pdev);
1108 if (rc < 0) {
1109 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1110 goto err_free_dev_1;
1111 }
1112
1113 rc = -ENODEV;
1114
1115 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1116 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1117 pci_name(pdev));
1118 goto err_pci_disable_2;
1119 }
1120 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1121 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1122 pci_name(pdev));
1123 goto err_pci_disable_2;
1124 }
1125
1126 rc = pci_request_regions(pdev, DRV_NAME);
1127 if (rc < 0) {
1128 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1129 pci_name(pdev));
1130 goto err_pci_disable_2;
1131 }
1132
1133 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1134 if (rc < 0) {
1135 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1136 pci_name(pdev));
1137 goto err_free_res_3;
1138 }
1139
1140 pci_set_master(pdev);
1141
1142 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1143 if (!ioaddr) {
1144 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1145 pci_name(pdev));
1146 rc = -EIO;
1147 goto err_free_res_3;
1148 }
1149
1150 tp->pci_dev = pdev;
1151 tp->mmio_addr = ioaddr;
1152
1153 sis190_irq_mask_and_ack(ioaddr);
1154
1155 sis190_soft_reset(ioaddr);
1156out:
1157 return dev;
1158
1159err_free_res_3:
1160 pci_release_regions(pdev);
1161err_pci_disable_2:
1162 pci_disable_device(pdev);
1163err_free_dev_1:
1164 free_netdev(dev);
1165err_out_0:
1166 dev = ERR_PTR(rc);
1167 goto out;
1168}
1169
1170static void sis190_tx_timeout(struct net_device *dev)
1171{
1172 struct sis190_private *tp = netdev_priv(dev);
1173 void __iomem *ioaddr = tp->mmio_addr;
1174 u8 tmp8;
1175
1176 /* Disable Tx, if not already */
1177 tmp8 = SIS_R8(TxControl);
1178 if (tmp8 & CmdTxEnb)
1179 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1180
1181 /* Disable interrupts by clearing the interrupt mask. */
1182 SIS_W32(IntrMask, 0x0000);
1183
1184 /* Stop a shared interrupt from scavenging while we are. */
1185 spin_lock_irq(&tp->lock);
1186 sis190_tx_clear(tp);
1187 spin_unlock_irq(&tp->lock);
1188
1189 /* ...and finally, reset everything. */
1190 sis190_hw_start(dev);
1191
1192 netif_wake_queue(dev);
1193}
1194
1195static void sis190_set_speed_auto(struct net_device *dev)
1196{
1197 struct sis190_private *tp = netdev_priv(dev);
1198 void __iomem *ioaddr = tp->mmio_addr;
1199 int val;
1200
1201 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1202
1203 val = mdio_read(ioaddr, MII_ADVERTISE);
1204
1205 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1206 // unchanged.
1207 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1208 ADVERTISE_100FULL | ADVERTISE_10FULL |
1209 ADVERTISE_100HALF | ADVERTISE_10HALF);
1210
1211 // Enable 1000 Full Mode.
1212 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1213
1214 // Enable auto-negotiation and restart auto-negotiation.
1215 mdio_write(ioaddr, MII_BMCR,
1216 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1217}
1218
1219static void sis190_get_drvinfo(struct net_device *dev,
1220 struct ethtool_drvinfo *info)
1221{
1222 struct sis190_private *tp = netdev_priv(dev);
1223
1224 strcpy(info->driver, DRV_NAME);
1225 strcpy(info->version, DRV_VERSION);
1226 strcpy(info->bus_info, pci_name(tp->pci_dev));
1227}
1228
1229static int sis190_get_regs_len(struct net_device *dev)
1230{
1231 return SIS190_REGS_SIZE;
1232}
1233
1234static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1235 void *p)
1236{
1237 struct sis190_private *tp = netdev_priv(dev);
1238 unsigned long flags;
1239
1240 if (regs->len > SIS190_REGS_SIZE)
1241 regs->len = SIS190_REGS_SIZE;
1242
1243 spin_lock_irqsave(&tp->lock, flags);
1244 memcpy_fromio(p, tp->mmio_addr, regs->len);
1245 spin_unlock_irqrestore(&tp->lock, flags);
1246}
1247
1248static u32 sis190_get_msglevel(struct net_device *dev)
1249{
1250 struct sis190_private *tp = netdev_priv(dev);
1251
1252 return tp->msg_enable;
1253}
1254
1255static void sis190_set_msglevel(struct net_device *dev, u32 value)
1256{
1257 struct sis190_private *tp = netdev_priv(dev);
1258
1259 tp->msg_enable = value;
1260}
1261
1262static struct ethtool_ops sis190_ethtool_ops = {
1263 .get_drvinfo = sis190_get_drvinfo,
1264 .get_regs_len = sis190_get_regs_len,
1265 .get_regs = sis190_get_regs,
1266 .get_link = ethtool_op_get_link,
1267 .get_msglevel = sis190_get_msglevel,
1268 .set_msglevel = sis190_set_msglevel,
1269};
1270
1271static int __devinit sis190_init_one(struct pci_dev *pdev,
1272 const struct pci_device_id *ent)
1273{
1274 static int printed_version = 0;
1275 struct sis190_private *tp;
1276 struct net_device *dev;
1277 void __iomem *ioaddr;
1278 int i, rc;
1279
1280 if (!printed_version) {
1281 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1282 printed_version = 1;
1283 }
1284
1285 dev = sis190_init_board(pdev);
1286 if (IS_ERR(dev)) {
1287 rc = PTR_ERR(dev);
1288 goto out;
1289 }
1290
1291 tp = netdev_priv(dev);
1292 ioaddr = tp->mmio_addr;
1293
1294 /* Get MAC address */
1295 /* Read node address from the EEPROM */
1296
1297 if (SIS_R32(ROMControl) & 0x4) {
1298 for (i = 0; i < 3; i++) {
1299 SIS_W16(RxMacAddr + 2*i,
1300 sis190_read_eeprom(ioaddr, 3 + i));
1301 }
1302 }
1303
1304 for (i = 0; i < MAC_ADDR_LEN; i++)
1305 dev->dev_addr[i] = SIS_R8(RxMacAddr + i);
1306
1307 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1308
1309 dev->open = sis190_open;
1310 dev->stop = sis190_close;
1311 dev->get_stats = sis190_get_stats;
1312 dev->tx_timeout = sis190_tx_timeout;
1313 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1314 dev->hard_start_xmit = sis190_start_xmit;
4405d3b5
FR
1315#ifdef CONFIG_NET_POLL_CONTROLLER
1316 dev->poll_controller = sis190_netpoll;
1317#endif
890e8d0a
FR
1318 dev->set_multicast_list = sis190_set_rx_mode;
1319 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1320 dev->irq = pdev->irq;
1321 dev->base_addr = (unsigned long) 0xdead;
1322
1323 spin_lock_init(&tp->lock);
1324 rc = register_netdev(dev);
1325 if (rc < 0) {
1326 sis190_release_board(pdev);
1327 goto out;
1328 }
1329
1330 pci_set_drvdata(pdev, dev);
1331
1332 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1333 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1334 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1335 ioaddr, dev->irq,
1336 dev->dev_addr[0], dev->dev_addr[1],
1337 dev->dev_addr[2], dev->dev_addr[3],
1338 dev->dev_addr[4], dev->dev_addr[5]);
1339
1340 netif_carrier_off(dev);
1341
1342 sis190_set_speed_auto(dev);
1343out:
1344 return rc;
1345}
1346
1347static void __devexit sis190_remove_one(struct pci_dev *pdev)
1348{
1349 struct net_device *dev = pci_get_drvdata(pdev);
1350
1351 unregister_netdev(dev);
1352 sis190_release_board(pdev);
1353 pci_set_drvdata(pdev, NULL);
1354}
1355
1356static struct pci_driver sis190_pci_driver = {
1357 .name = DRV_NAME,
1358 .id_table = sis190_pci_tbl,
1359 .probe = sis190_init_one,
1360 .remove = __devexit_p(sis190_remove_one),
1361};
1362
1363static int __init sis190_init_module(void)
1364{
1365 return pci_module_init(&sis190_pci_driver);
1366}
1367
1368static void __exit sis190_cleanup_module(void)
1369{
1370 pci_unregister_driver(&sis190_pci_driver);
1371}
1372
1373module_init(sis190_init_module);
1374module_exit(sis190_cleanup_module);