[NET]: Make NAPI polling independent of struct net_device objects.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / sis190.c
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
10
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
17
18 See the file COPYING in this distribution for more information.
19
20 */
21
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
34
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
49
50 #define DRV_VERSION "1.2"
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
54
55 #define sis190_rx_skb netif_rx
56 #define sis190_rx_quota(count, quota) count
57
58 #define MAC_ADDR_LEN 6
59
60 #define NUM_TX_DESC 64 /* [8..1024] */
61 #define NUM_RX_DESC 64 /* [8..8192] */
62 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64 #define RX_BUF_SIZE 1536
65 #define RX_BUF_MASK 0xfff8
66
67 #define SIS190_REGS_SIZE 0x80
68 #define SIS190_TX_TIMEOUT (6*HZ)
69 #define SIS190_PHY_TIMEOUT (10*HZ)
70 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
72 NETIF_MSG_IFDOWN)
73
74 /* Enhanced PHY access register bit definitions */
75 #define EhnMIIread 0x0000
76 #define EhnMIIwrite 0x0020
77 #define EhnMIIdataShift 16
78 #define EhnMIIpmdShift 6 /* 7016 only */
79 #define EhnMIIregShift 11
80 #define EhnMIIreq 0x0010
81 #define EhnMIInotDone 0x0010
82
83 /* Write/read MMIO register */
84 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87 #define SIS_R8(reg) readb (ioaddr + (reg))
88 #define SIS_R16(reg) readw (ioaddr + (reg))
89 #define SIS_R32(reg) readl (ioaddr + (reg))
90
91 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
92
93 enum sis190_registers {
94 TxControl = 0x00,
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08, // reserved
97 TxSts = 0x0c, // unused (Control/Status)
98 RxControl = 0x10,
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18, // reserved
101 RxSts = 0x1c, // unused
102 IntrStatus = 0x20,
103 IntrMask = 0x24,
104 IntrControl = 0x28,
105 IntrTimer = 0x2c, // unused (Interupt Timer)
106 PMControl = 0x30, // unused (Power Mgmt Control/Status)
107 rsv2 = 0x34, // reserved
108 ROMControl = 0x38,
109 ROMInterface = 0x3c,
110 StationControl = 0x40,
111 GMIIControl = 0x44,
112 GIoCR = 0x48, // unused (GMAC IO Compensation)
113 GIoCtrl = 0x4c, // unused (GMAC IO Control)
114 TxMacControl = 0x50,
115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
117 rsv3 = 0x5c, // reserved
118 RxMacControl = 0x60,
119 RxMacAddr = 0x62,
120 RxHashTable = 0x68,
121 // Undocumented = 0x6c,
122 RxWolCtrl = 0x70,
123 RxWolData = 0x74, // unused (Rx WOL Data Access)
124 RxMPSControl = 0x78, // unused (Rx MPS Control)
125 rsv4 = 0x7c, // reserved
126 };
127
128 enum sis190_register_content {
129 /* IntrStatus */
130 SoftInt = 0x40000000, // unused
131 Timeup = 0x20000000, // unused
132 PauseFrame = 0x00080000, // unused
133 MagicPacket = 0x00040000, // unused
134 WakeupFrame = 0x00020000, // unused
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
137 RxQInt = 0x00000040,
138 TxQ1Empty = 0x00000020, // unused
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008, // unused
141 TxQ0Int = 0x00000004,
142 RxHalt = 0x00000002,
143 TxHalt = 0x00000001,
144
145 /* {Rx/Tx}CmdBits */
146 CmdReset = 0x10,
147 CmdRxEnb = 0x08, // unused
148 CmdTxEnb = 0x01,
149 RxBufEmpty = 0x01, // unused
150
151 /* Cfg9346Bits */
152 Cfg9346_Lock = 0x00, // unused
153 Cfg9346_Unlock = 0xc0, // unused
154
155 /* RxMacControl */
156 AcceptErr = 0x20, // unused
157 AcceptRunt = 0x10, // unused
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
162
163 /* RxConfigBits */
164 RxCfgFIFOShift = 13,
165 RxCfgDMAShift = 8, // 0x1a in RxControl ?
166
167 /* TxConfigBits */
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
170
171 LinkStatus = 0x02, // unused
172 FullDup = 0x01, // unused
173
174 /* TBICSRBit */
175 TBILinkOK = 0x02000000, // unused
176 };
177
178 struct TxDesc {
179 __le32 PSize;
180 __le32 status;
181 __le32 addr;
182 __le32 size;
183 };
184
185 struct RxDesc {
186 __le32 PSize;
187 __le32 status;
188 __le32 addr;
189 __le32 size;
190 };
191
192 enum _DescStatusBit {
193 /* _Desc.status */
194 OWNbit = 0x80000000, // RXOWN/TXOWN
195 INTbit = 0x40000000, // RXINT/TXINT
196 CRCbit = 0x00020000, // CRCOFF/CRCEN
197 PADbit = 0x00010000, // PREADD/PADEN
198 /* _Desc.size */
199 RingEnd = 0x80000000,
200 /* TxDesc.status */
201 LSEN = 0x08000000, // TSO ? -- FR
202 IPCS = 0x04000000,
203 TCPCS = 0x02000000,
204 UDPCS = 0x01000000,
205 BSTEN = 0x00800000,
206 EXTEN = 0x00400000,
207 DEFEN = 0x00200000,
208 BKFEN = 0x00100000,
209 CRSEN = 0x00080000,
210 COLEN = 0x00040000,
211 THOL3 = 0x30000000,
212 THOL2 = 0x20000000,
213 THOL1 = 0x10000000,
214 THOL0 = 0x00000000,
215 /* RxDesc.status */
216 IPON = 0x20000000,
217 TCPON = 0x10000000,
218 UDPON = 0x08000000,
219 Wakup = 0x00400000,
220 Magic = 0x00200000,
221 Pause = 0x00100000,
222 DEFbit = 0x00200000,
223 BCAST = 0x000c0000,
224 MCAST = 0x00080000,
225 UCAST = 0x00040000,
226 /* RxDesc.PSize */
227 TAGON = 0x80000000,
228 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
229 ABORT = 0x00800000,
230 SHORT = 0x00400000,
231 LIMIT = 0x00200000,
232 MIIER = 0x00100000,
233 OVRUN = 0x00080000,
234 NIBON = 0x00040000,
235 COLON = 0x00020000,
236 CRCOK = 0x00010000,
237 RxSizeMask = 0x0000ffff
238 /*
239 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
240 * provide two (unused with Linux) Tx queues. No publically
241 * available documentation alas.
242 */
243 };
244
245 enum sis190_eeprom_access_register_bits {
246 EECS = 0x00000001, // unused
247 EECLK = 0x00000002, // unused
248 EEDO = 0x00000008, // unused
249 EEDI = 0x00000004, // unused
250 EEREQ = 0x00000080,
251 EEROP = 0x00000200,
252 EEWOP = 0x00000100 // unused
253 };
254
255 /* EEPROM Addresses */
256 enum sis190_eeprom_address {
257 EEPROMSignature = 0x00,
258 EEPROMCLK = 0x01, // unused
259 EEPROMInfo = 0x02,
260 EEPROMMACAddr = 0x03
261 };
262
263 enum sis190_feature {
264 F_HAS_RGMII = 1,
265 F_PHY_88E1111 = 2,
266 F_PHY_BCM5461 = 4
267 };
268
269 struct sis190_private {
270 void __iomem *mmio_addr;
271 struct pci_dev *pci_dev;
272 struct net_device *dev;
273 struct net_device_stats stats;
274 spinlock_t lock;
275 u32 rx_buf_sz;
276 u32 cur_rx;
277 u32 cur_tx;
278 u32 dirty_rx;
279 u32 dirty_tx;
280 dma_addr_t rx_dma;
281 dma_addr_t tx_dma;
282 struct RxDesc *RxDescRing;
283 struct TxDesc *TxDescRing;
284 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
285 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
286 struct work_struct phy_task;
287 struct timer_list timer;
288 u32 msg_enable;
289 struct mii_if_info mii_if;
290 struct list_head first_phy;
291 u32 features;
292 };
293
294 struct sis190_phy {
295 struct list_head list;
296 int phy_id;
297 u16 id[2];
298 u16 status;
299 u8 type;
300 };
301
302 enum sis190_phy_type {
303 UNKNOWN = 0x00,
304 HOME = 0x01,
305 LAN = 0x02,
306 MIX = 0x03
307 };
308
309 static struct mii_chip_info {
310 const char *name;
311 u16 id[2];
312 unsigned int type;
313 u32 feature;
314 } mii_chip_table[] = {
315 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
316 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
317 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
318 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
319 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
320 { NULL, }
321 };
322
323 static const struct {
324 const char *name;
325 } sis_chip_info[] = {
326 { "SiS 190 PCI Fast Ethernet adapter" },
327 { "SiS 191 PCI Gigabit Ethernet adapter" },
328 };
329
330 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
331 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
332 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
333 { 0, },
334 };
335
336 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
337
338 static int rx_copybreak = 200;
339
340 static struct {
341 u32 msg_enable;
342 } debug = { -1 };
343
344 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
345 module_param(rx_copybreak, int, 0);
346 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
347 module_param_named(debug, debug.msg_enable, int, 0);
348 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
349 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
350 MODULE_VERSION(DRV_VERSION);
351 MODULE_LICENSE("GPL");
352
353 static const u32 sis190_intr_mask =
354 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
355
356 /*
357 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
358 * The chips use a 64 element hash table based on the Ethernet CRC.
359 */
360 static const int multicast_filter_limit = 32;
361
362 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
363 {
364 unsigned int i;
365
366 SIS_W32(GMIIControl, ctl);
367
368 msleep(1);
369
370 for (i = 0; i < 100; i++) {
371 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
372 break;
373 msleep(1);
374 }
375
376 if (i > 999)
377 printk(KERN_ERR PFX "PHY command failed !\n");
378 }
379
380 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
381 {
382 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
383 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
384 (((u32) val) << EhnMIIdataShift));
385 }
386
387 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
388 {
389 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
390 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
391
392 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
393 }
394
395 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
396 {
397 struct sis190_private *tp = netdev_priv(dev);
398
399 mdio_write(tp->mmio_addr, phy_id, reg, val);
400 }
401
402 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
403 {
404 struct sis190_private *tp = netdev_priv(dev);
405
406 return mdio_read(tp->mmio_addr, phy_id, reg);
407 }
408
409 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
410 {
411 mdio_read(ioaddr, phy_id, reg);
412 return mdio_read(ioaddr, phy_id, reg);
413 }
414
415 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
416 {
417 u16 data = 0xffff;
418 unsigned int i;
419
420 if (!(SIS_R32(ROMControl) & 0x0002))
421 return 0;
422
423 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
424
425 for (i = 0; i < 200; i++) {
426 if (!(SIS_R32(ROMInterface) & EEREQ)) {
427 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
428 break;
429 }
430 msleep(1);
431 }
432
433 return data;
434 }
435
436 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
437 {
438 SIS_W32(IntrMask, 0x00);
439 SIS_W32(IntrStatus, 0xffffffff);
440 SIS_PCI_COMMIT();
441 }
442
443 static void sis190_asic_down(void __iomem *ioaddr)
444 {
445 /* Stop the chip's Tx and Rx DMA processes. */
446
447 SIS_W32(TxControl, 0x1a00);
448 SIS_W32(RxControl, 0x1a00);
449
450 sis190_irq_mask_and_ack(ioaddr);
451 }
452
453 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
454 {
455 desc->size |= cpu_to_le32(RingEnd);
456 }
457
458 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
459 {
460 u32 eor = le32_to_cpu(desc->size) & RingEnd;
461
462 desc->PSize = 0x0;
463 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
464 wmb();
465 desc->status = cpu_to_le32(OWNbit | INTbit);
466 }
467
468 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
469 u32 rx_buf_sz)
470 {
471 desc->addr = cpu_to_le32(mapping);
472 sis190_give_to_asic(desc, rx_buf_sz);
473 }
474
475 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
476 {
477 desc->PSize = 0x0;
478 desc->addr = 0xdeadbeef;
479 desc->size &= cpu_to_le32(RingEnd);
480 wmb();
481 desc->status = 0x0;
482 }
483
484 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
485 struct RxDesc *desc, u32 rx_buf_sz)
486 {
487 struct sk_buff *skb;
488 dma_addr_t mapping;
489 int ret = 0;
490
491 skb = dev_alloc_skb(rx_buf_sz);
492 if (!skb)
493 goto err_out;
494
495 *sk_buff = skb;
496
497 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
498 PCI_DMA_FROMDEVICE);
499
500 sis190_map_to_asic(desc, mapping, rx_buf_sz);
501 out:
502 return ret;
503
504 err_out:
505 ret = -ENOMEM;
506 sis190_make_unusable_by_asic(desc);
507 goto out;
508 }
509
510 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
511 u32 start, u32 end)
512 {
513 u32 cur;
514
515 for (cur = start; cur < end; cur++) {
516 int ret, i = cur % NUM_RX_DESC;
517
518 if (tp->Rx_skbuff[i])
519 continue;
520
521 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
522 tp->RxDescRing + i, tp->rx_buf_sz);
523 if (ret < 0)
524 break;
525 }
526 return cur - start;
527 }
528
529 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
530 struct RxDesc *desc, int rx_buf_sz)
531 {
532 int ret = -1;
533
534 if (pkt_size < rx_copybreak) {
535 struct sk_buff *skb;
536
537 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
538 if (skb) {
539 skb_reserve(skb, NET_IP_ALIGN);
540 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
541 *sk_buff = skb;
542 sis190_give_to_asic(desc, rx_buf_sz);
543 ret = 0;
544 }
545 }
546 return ret;
547 }
548
549 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
550 {
551 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
552
553 if ((status & CRCOK) && !(status & ErrMask))
554 return 0;
555
556 if (!(status & CRCOK))
557 stats->rx_crc_errors++;
558 else if (status & OVRUN)
559 stats->rx_over_errors++;
560 else if (status & (SHORT | LIMIT))
561 stats->rx_length_errors++;
562 else if (status & (MIIER | NIBON | COLON))
563 stats->rx_frame_errors++;
564
565 stats->rx_errors++;
566 return -1;
567 }
568
569 static int sis190_rx_interrupt(struct net_device *dev,
570 struct sis190_private *tp, void __iomem *ioaddr)
571 {
572 struct net_device_stats *stats = &tp->stats;
573 u32 rx_left, cur_rx = tp->cur_rx;
574 u32 delta, count;
575
576 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
577 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
578
579 for (; rx_left > 0; rx_left--, cur_rx++) {
580 unsigned int entry = cur_rx % NUM_RX_DESC;
581 struct RxDesc *desc = tp->RxDescRing + entry;
582 u32 status;
583
584 if (desc->status & OWNbit)
585 break;
586
587 status = le32_to_cpu(desc->PSize);
588
589 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
590 // status);
591
592 if (sis190_rx_pkt_err(status, stats) < 0)
593 sis190_give_to_asic(desc, tp->rx_buf_sz);
594 else {
595 struct sk_buff *skb = tp->Rx_skbuff[entry];
596 int pkt_size = (status & RxSizeMask) - 4;
597 void (*pci_action)(struct pci_dev *, dma_addr_t,
598 size_t, int) = pci_dma_sync_single_for_device;
599
600 if (unlikely(pkt_size > tp->rx_buf_sz)) {
601 net_intr(tp, KERN_INFO
602 "%s: (frag) status = %08x.\n",
603 dev->name, status);
604 stats->rx_dropped++;
605 stats->rx_length_errors++;
606 sis190_give_to_asic(desc, tp->rx_buf_sz);
607 continue;
608 }
609
610 pci_dma_sync_single_for_cpu(tp->pci_dev,
611 le32_to_cpu(desc->addr), tp->rx_buf_sz,
612 PCI_DMA_FROMDEVICE);
613
614 if (sis190_try_rx_copy(&skb, pkt_size, desc,
615 tp->rx_buf_sz)) {
616 pci_action = pci_unmap_single;
617 tp->Rx_skbuff[entry] = NULL;
618 sis190_make_unusable_by_asic(desc);
619 }
620
621 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
622 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
623
624 skb_put(skb, pkt_size);
625 skb->protocol = eth_type_trans(skb, dev);
626
627 sis190_rx_skb(skb);
628
629 dev->last_rx = jiffies;
630 stats->rx_packets++;
631 stats->rx_bytes += pkt_size;
632 if ((status & BCAST) == MCAST)
633 stats->multicast++;
634 }
635 }
636 count = cur_rx - tp->cur_rx;
637 tp->cur_rx = cur_rx;
638
639 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
640 if (!delta && count && netif_msg_intr(tp))
641 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
642 tp->dirty_rx += delta;
643
644 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
645 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
646
647 return count;
648 }
649
650 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
651 struct TxDesc *desc)
652 {
653 unsigned int len;
654
655 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
656
657 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
658
659 memset(desc, 0x00, sizeof(*desc));
660 }
661
662 static void sis190_tx_interrupt(struct net_device *dev,
663 struct sis190_private *tp, void __iomem *ioaddr)
664 {
665 u32 pending, dirty_tx = tp->dirty_tx;
666 /*
667 * It would not be needed if queueing was allowed to be enabled
668 * again too early (hint: think preempt and unclocked smp systems).
669 */
670 unsigned int queue_stopped;
671
672 smp_rmb();
673 pending = tp->cur_tx - dirty_tx;
674 queue_stopped = (pending == NUM_TX_DESC);
675
676 for (; pending; pending--, dirty_tx++) {
677 unsigned int entry = dirty_tx % NUM_TX_DESC;
678 struct TxDesc *txd = tp->TxDescRing + entry;
679 struct sk_buff *skb;
680
681 if (le32_to_cpu(txd->status) & OWNbit)
682 break;
683
684 skb = tp->Tx_skbuff[entry];
685
686 tp->stats.tx_packets++;
687 tp->stats.tx_bytes += skb->len;
688
689 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
690 tp->Tx_skbuff[entry] = NULL;
691 dev_kfree_skb_irq(skb);
692 }
693
694 if (tp->dirty_tx != dirty_tx) {
695 tp->dirty_tx = dirty_tx;
696 smp_wmb();
697 if (queue_stopped)
698 netif_wake_queue(dev);
699 }
700 }
701
702 /*
703 * The interrupt handler does all of the Rx thread work and cleans up after
704 * the Tx thread.
705 */
706 static irqreturn_t sis190_interrupt(int irq, void *__dev)
707 {
708 struct net_device *dev = __dev;
709 struct sis190_private *tp = netdev_priv(dev);
710 void __iomem *ioaddr = tp->mmio_addr;
711 unsigned int handled = 0;
712 u32 status;
713
714 status = SIS_R32(IntrStatus);
715
716 if ((status == 0xffffffff) || !status)
717 goto out;
718
719 handled = 1;
720
721 if (unlikely(!netif_running(dev))) {
722 sis190_asic_down(ioaddr);
723 goto out;
724 }
725
726 SIS_W32(IntrStatus, status);
727
728 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
729
730 if (status & LinkChange) {
731 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
732 schedule_work(&tp->phy_task);
733 }
734
735 if (status & RxQInt)
736 sis190_rx_interrupt(dev, tp, ioaddr);
737
738 if (status & TxQ0Int)
739 sis190_tx_interrupt(dev, tp, ioaddr);
740 out:
741 return IRQ_RETVAL(handled);
742 }
743
744 #ifdef CONFIG_NET_POLL_CONTROLLER
745 static void sis190_netpoll(struct net_device *dev)
746 {
747 struct sis190_private *tp = netdev_priv(dev);
748 struct pci_dev *pdev = tp->pci_dev;
749
750 disable_irq(pdev->irq);
751 sis190_interrupt(pdev->irq, dev);
752 enable_irq(pdev->irq);
753 }
754 #endif
755
756 static void sis190_free_rx_skb(struct sis190_private *tp,
757 struct sk_buff **sk_buff, struct RxDesc *desc)
758 {
759 struct pci_dev *pdev = tp->pci_dev;
760
761 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
762 PCI_DMA_FROMDEVICE);
763 dev_kfree_skb(*sk_buff);
764 *sk_buff = NULL;
765 sis190_make_unusable_by_asic(desc);
766 }
767
768 static void sis190_rx_clear(struct sis190_private *tp)
769 {
770 unsigned int i;
771
772 for (i = 0; i < NUM_RX_DESC; i++) {
773 if (!tp->Rx_skbuff[i])
774 continue;
775 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
776 }
777 }
778
779 static void sis190_init_ring_indexes(struct sis190_private *tp)
780 {
781 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
782 }
783
784 static int sis190_init_ring(struct net_device *dev)
785 {
786 struct sis190_private *tp = netdev_priv(dev);
787
788 sis190_init_ring_indexes(tp);
789
790 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
791 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
792
793 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
794 goto err_rx_clear;
795
796 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
797
798 return 0;
799
800 err_rx_clear:
801 sis190_rx_clear(tp);
802 return -ENOMEM;
803 }
804
805 static void sis190_set_rx_mode(struct net_device *dev)
806 {
807 struct sis190_private *tp = netdev_priv(dev);
808 void __iomem *ioaddr = tp->mmio_addr;
809 unsigned long flags;
810 u32 mc_filter[2]; /* Multicast hash filter */
811 u16 rx_mode;
812
813 if (dev->flags & IFF_PROMISC) {
814 rx_mode =
815 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
816 AcceptAllPhys;
817 mc_filter[1] = mc_filter[0] = 0xffffffff;
818 } else if ((dev->mc_count > multicast_filter_limit) ||
819 (dev->flags & IFF_ALLMULTI)) {
820 /* Too many to filter perfectly -- accept all multicasts. */
821 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
822 mc_filter[1] = mc_filter[0] = 0xffffffff;
823 } else {
824 struct dev_mc_list *mclist;
825 unsigned int i;
826
827 rx_mode = AcceptBroadcast | AcceptMyPhys;
828 mc_filter[1] = mc_filter[0] = 0;
829 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
830 i++, mclist = mclist->next) {
831 int bit_nr =
832 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
833 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
834 rx_mode |= AcceptMulticast;
835 }
836 }
837
838 spin_lock_irqsave(&tp->lock, flags);
839
840 SIS_W16(RxMacControl, rx_mode | 0x2);
841 SIS_W32(RxHashTable, mc_filter[0]);
842 SIS_W32(RxHashTable + 4, mc_filter[1]);
843
844 spin_unlock_irqrestore(&tp->lock, flags);
845 }
846
847 static void sis190_soft_reset(void __iomem *ioaddr)
848 {
849 SIS_W32(IntrControl, 0x8000);
850 SIS_PCI_COMMIT();
851 msleep(1);
852 SIS_W32(IntrControl, 0x0);
853 sis190_asic_down(ioaddr);
854 msleep(1);
855 }
856
857 static void sis190_hw_start(struct net_device *dev)
858 {
859 struct sis190_private *tp = netdev_priv(dev);
860 void __iomem *ioaddr = tp->mmio_addr;
861
862 sis190_soft_reset(ioaddr);
863
864 SIS_W32(TxDescStartAddr, tp->tx_dma);
865 SIS_W32(RxDescStartAddr, tp->rx_dma);
866
867 SIS_W32(IntrStatus, 0xffffffff);
868 SIS_W32(IntrMask, 0x0);
869 SIS_W32(GMIIControl, 0x0);
870 SIS_W32(TxMacControl, 0x60);
871 SIS_W16(RxMacControl, 0x02);
872 SIS_W32(RxHashTable, 0x0);
873 SIS_W32(0x6c, 0x0);
874 SIS_W32(RxWolCtrl, 0x0);
875 SIS_W32(RxWolData, 0x0);
876
877 SIS_PCI_COMMIT();
878
879 sis190_set_rx_mode(dev);
880
881 /* Enable all known interrupts by setting the interrupt mask. */
882 SIS_W32(IntrMask, sis190_intr_mask);
883
884 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
885 SIS_W32(RxControl, 0x1a1d);
886
887 netif_start_queue(dev);
888 }
889
890 static void sis190_phy_task(struct work_struct *work)
891 {
892 struct sis190_private *tp =
893 container_of(work, struct sis190_private, phy_task);
894 struct net_device *dev = tp->dev;
895 void __iomem *ioaddr = tp->mmio_addr;
896 int phy_id = tp->mii_if.phy_id;
897 u16 val;
898
899 rtnl_lock();
900
901 if (!netif_running(dev))
902 goto out_unlock;
903
904 val = mdio_read(ioaddr, phy_id, MII_BMCR);
905 if (val & BMCR_RESET) {
906 // FIXME: needlessly high ? -- FR 02/07/2005
907 mod_timer(&tp->timer, jiffies + HZ/10);
908 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
909 BMSR_ANEGCOMPLETE)) {
910 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
911 dev->name);
912 netif_carrier_off(dev);
913 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
914 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
915 } else {
916 /* Rejoice ! */
917 struct {
918 int val;
919 u32 ctl;
920 const char *msg;
921 } reg31[] = {
922 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
923 "1000 Mbps Full Duplex" },
924 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
925 "1000 Mbps Half Duplex" },
926 { LPA_100FULL, 0x04000800 | 0x00001000,
927 "100 Mbps Full Duplex" },
928 { LPA_100HALF, 0x04000800,
929 "100 Mbps Half Duplex" },
930 { LPA_10FULL, 0x04000400 | 0x00001000,
931 "10 Mbps Full Duplex" },
932 { LPA_10HALF, 0x04000400,
933 "10 Mbps Half Duplex" },
934 { 0, 0x04000400, "unknown" }
935 }, *p;
936 u16 adv;
937
938 val = mdio_read(ioaddr, phy_id, 0x1f);
939 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
940
941 val = mdio_read(ioaddr, phy_id, MII_LPA);
942 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
943 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
944 dev->name, val, adv);
945
946 val &= adv;
947
948 for (p = reg31; p->val; p++) {
949 if ((val & p->val) == p->val)
950 break;
951 }
952
953 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
954
955 if ((tp->features & F_HAS_RGMII) &&
956 (tp->features & F_PHY_BCM5461)) {
957 // Set Tx Delay in RGMII mode.
958 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
959 udelay(200);
960 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
961 p->ctl |= 0x03000000;
962 }
963
964 SIS_W32(StationControl, p->ctl);
965
966 if (tp->features & F_HAS_RGMII) {
967 SIS_W32(RGDelay, 0x0441);
968 SIS_W32(RGDelay, 0x0440);
969 }
970
971 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
972 p->msg);
973 netif_carrier_on(dev);
974 }
975
976 out_unlock:
977 rtnl_unlock();
978 }
979
980 static void sis190_phy_timer(unsigned long __opaque)
981 {
982 struct net_device *dev = (struct net_device *)__opaque;
983 struct sis190_private *tp = netdev_priv(dev);
984
985 if (likely(netif_running(dev)))
986 schedule_work(&tp->phy_task);
987 }
988
989 static inline void sis190_delete_timer(struct net_device *dev)
990 {
991 struct sis190_private *tp = netdev_priv(dev);
992
993 del_timer_sync(&tp->timer);
994 }
995
996 static inline void sis190_request_timer(struct net_device *dev)
997 {
998 struct sis190_private *tp = netdev_priv(dev);
999 struct timer_list *timer = &tp->timer;
1000
1001 init_timer(timer);
1002 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1003 timer->data = (unsigned long)dev;
1004 timer->function = sis190_phy_timer;
1005 add_timer(timer);
1006 }
1007
1008 static void sis190_set_rxbufsize(struct sis190_private *tp,
1009 struct net_device *dev)
1010 {
1011 unsigned int mtu = dev->mtu;
1012
1013 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1014 /* RxDesc->size has a licence to kill the lower bits */
1015 if (tp->rx_buf_sz & 0x07) {
1016 tp->rx_buf_sz += 8;
1017 tp->rx_buf_sz &= RX_BUF_MASK;
1018 }
1019 }
1020
1021 static int sis190_open(struct net_device *dev)
1022 {
1023 struct sis190_private *tp = netdev_priv(dev);
1024 struct pci_dev *pdev = tp->pci_dev;
1025 int rc = -ENOMEM;
1026
1027 sis190_set_rxbufsize(tp, dev);
1028
1029 /*
1030 * Rx and Tx descriptors need 256 bytes alignment.
1031 * pci_alloc_consistent() guarantees a stronger alignment.
1032 */
1033 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1034 if (!tp->TxDescRing)
1035 goto out;
1036
1037 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1038 if (!tp->RxDescRing)
1039 goto err_free_tx_0;
1040
1041 rc = sis190_init_ring(dev);
1042 if (rc < 0)
1043 goto err_free_rx_1;
1044
1045 INIT_WORK(&tp->phy_task, sis190_phy_task);
1046
1047 sis190_request_timer(dev);
1048
1049 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1050 if (rc < 0)
1051 goto err_release_timer_2;
1052
1053 sis190_hw_start(dev);
1054 out:
1055 return rc;
1056
1057 err_release_timer_2:
1058 sis190_delete_timer(dev);
1059 sis190_rx_clear(tp);
1060 err_free_rx_1:
1061 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1062 tp->rx_dma);
1063 err_free_tx_0:
1064 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1065 tp->tx_dma);
1066 goto out;
1067 }
1068
1069 static void sis190_tx_clear(struct sis190_private *tp)
1070 {
1071 unsigned int i;
1072
1073 for (i = 0; i < NUM_TX_DESC; i++) {
1074 struct sk_buff *skb = tp->Tx_skbuff[i];
1075
1076 if (!skb)
1077 continue;
1078
1079 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1080 tp->Tx_skbuff[i] = NULL;
1081 dev_kfree_skb(skb);
1082
1083 tp->stats.tx_dropped++;
1084 }
1085 tp->cur_tx = tp->dirty_tx = 0;
1086 }
1087
1088 static void sis190_down(struct net_device *dev)
1089 {
1090 struct sis190_private *tp = netdev_priv(dev);
1091 void __iomem *ioaddr = tp->mmio_addr;
1092 unsigned int poll_locked = 0;
1093
1094 sis190_delete_timer(dev);
1095
1096 netif_stop_queue(dev);
1097
1098 do {
1099 spin_lock_irq(&tp->lock);
1100
1101 sis190_asic_down(ioaddr);
1102
1103 spin_unlock_irq(&tp->lock);
1104
1105 synchronize_irq(dev->irq);
1106
1107 if (!poll_locked)
1108 poll_locked++;
1109
1110 synchronize_sched();
1111
1112 } while (SIS_R32(IntrMask));
1113
1114 sis190_tx_clear(tp);
1115 sis190_rx_clear(tp);
1116 }
1117
1118 static int sis190_close(struct net_device *dev)
1119 {
1120 struct sis190_private *tp = netdev_priv(dev);
1121 struct pci_dev *pdev = tp->pci_dev;
1122
1123 sis190_down(dev);
1124
1125 free_irq(dev->irq, dev);
1126
1127 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1128 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1129
1130 tp->TxDescRing = NULL;
1131 tp->RxDescRing = NULL;
1132
1133 return 0;
1134 }
1135
1136 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1137 {
1138 struct sis190_private *tp = netdev_priv(dev);
1139 void __iomem *ioaddr = tp->mmio_addr;
1140 u32 len, entry, dirty_tx;
1141 struct TxDesc *desc;
1142 dma_addr_t mapping;
1143
1144 if (unlikely(skb->len < ETH_ZLEN)) {
1145 if (skb_padto(skb, ETH_ZLEN)) {
1146 tp->stats.tx_dropped++;
1147 goto out;
1148 }
1149 len = ETH_ZLEN;
1150 } else {
1151 len = skb->len;
1152 }
1153
1154 entry = tp->cur_tx % NUM_TX_DESC;
1155 desc = tp->TxDescRing + entry;
1156
1157 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1158 netif_stop_queue(dev);
1159 net_tx_err(tp, KERN_ERR PFX
1160 "%s: BUG! Tx Ring full when queue awake!\n",
1161 dev->name);
1162 return NETDEV_TX_BUSY;
1163 }
1164
1165 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1166
1167 tp->Tx_skbuff[entry] = skb;
1168
1169 desc->PSize = cpu_to_le32(len);
1170 desc->addr = cpu_to_le32(mapping);
1171
1172 desc->size = cpu_to_le32(len);
1173 if (entry == (NUM_TX_DESC - 1))
1174 desc->size |= cpu_to_le32(RingEnd);
1175
1176 wmb();
1177
1178 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1179
1180 tp->cur_tx++;
1181
1182 smp_wmb();
1183
1184 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1185
1186 dev->trans_start = jiffies;
1187
1188 dirty_tx = tp->dirty_tx;
1189 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1190 netif_stop_queue(dev);
1191 smp_rmb();
1192 if (dirty_tx != tp->dirty_tx)
1193 netif_wake_queue(dev);
1194 }
1195 out:
1196 return NETDEV_TX_OK;
1197 }
1198
1199 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1200 {
1201 struct sis190_private *tp = netdev_priv(dev);
1202
1203 return &tp->stats;
1204 }
1205
1206 static void sis190_free_phy(struct list_head *first_phy)
1207 {
1208 struct sis190_phy *cur, *next;
1209
1210 list_for_each_entry_safe(cur, next, first_phy, list) {
1211 kfree(cur);
1212 }
1213 }
1214
1215 /**
1216 * sis190_default_phy - Select default PHY for sis190 mac.
1217 * @dev: the net device to probe for
1218 *
1219 * Select first detected PHY with link as default.
1220 * If no one is link on, select PHY whose types is HOME as default.
1221 * If HOME doesn't exist, select LAN.
1222 */
1223 static u16 sis190_default_phy(struct net_device *dev)
1224 {
1225 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1226 struct sis190_private *tp = netdev_priv(dev);
1227 struct mii_if_info *mii_if = &tp->mii_if;
1228 void __iomem *ioaddr = tp->mmio_addr;
1229 u16 status;
1230
1231 phy_home = phy_default = phy_lan = NULL;
1232
1233 list_for_each_entry(phy, &tp->first_phy, list) {
1234 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1235
1236 // Link ON & Not select default PHY & not ghost PHY.
1237 if ((status & BMSR_LSTATUS) &&
1238 !phy_default &&
1239 (phy->type != UNKNOWN)) {
1240 phy_default = phy;
1241 } else {
1242 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1243 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1244 status | BMCR_ANENABLE | BMCR_ISOLATE);
1245 if (phy->type == HOME)
1246 phy_home = phy;
1247 else if (phy->type == LAN)
1248 phy_lan = phy;
1249 }
1250 }
1251
1252 if (!phy_default) {
1253 if (phy_home)
1254 phy_default = phy_home;
1255 else if (phy_lan)
1256 phy_default = phy_lan;
1257 else
1258 phy_default = list_entry(&tp->first_phy,
1259 struct sis190_phy, list);
1260 }
1261
1262 if (mii_if->phy_id != phy_default->phy_id) {
1263 mii_if->phy_id = phy_default->phy_id;
1264 net_probe(tp, KERN_INFO
1265 "%s: Using transceiver at address %d as default.\n",
1266 pci_name(tp->pci_dev), mii_if->phy_id);
1267 }
1268
1269 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1270 status &= (~BMCR_ISOLATE);
1271
1272 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1273 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1274
1275 return status;
1276 }
1277
1278 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1279 struct sis190_phy *phy, unsigned int phy_id,
1280 u16 mii_status)
1281 {
1282 void __iomem *ioaddr = tp->mmio_addr;
1283 struct mii_chip_info *p;
1284
1285 INIT_LIST_HEAD(&phy->list);
1286 phy->status = mii_status;
1287 phy->phy_id = phy_id;
1288
1289 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1290 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1291
1292 for (p = mii_chip_table; p->type; p++) {
1293 if ((p->id[0] == phy->id[0]) &&
1294 (p->id[1] == (phy->id[1] & 0xfff0))) {
1295 break;
1296 }
1297 }
1298
1299 if (p->id[1]) {
1300 phy->type = (p->type == MIX) ?
1301 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1302 LAN : HOME) : p->type;
1303 tp->features |= p->feature;
1304 } else
1305 phy->type = UNKNOWN;
1306
1307 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1308 pci_name(tp->pci_dev),
1309 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1310 }
1311
1312 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1313 {
1314 if (tp->features & F_PHY_88E1111) {
1315 void __iomem *ioaddr = tp->mmio_addr;
1316 int phy_id = tp->mii_if.phy_id;
1317 u16 reg[2][2] = {
1318 { 0x808b, 0x0ce1 },
1319 { 0x808f, 0x0c60 }
1320 }, *p;
1321
1322 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1323
1324 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1325 udelay(200);
1326 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1327 udelay(200);
1328 }
1329 }
1330
1331 /**
1332 * sis190_mii_probe - Probe MII PHY for sis190
1333 * @dev: the net device to probe for
1334 *
1335 * Search for total of 32 possible mii phy addresses.
1336 * Identify and set current phy if found one,
1337 * return error if it failed to found.
1338 */
1339 static int __devinit sis190_mii_probe(struct net_device *dev)
1340 {
1341 struct sis190_private *tp = netdev_priv(dev);
1342 struct mii_if_info *mii_if = &tp->mii_if;
1343 void __iomem *ioaddr = tp->mmio_addr;
1344 int phy_id;
1345 int rc = 0;
1346
1347 INIT_LIST_HEAD(&tp->first_phy);
1348
1349 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1350 struct sis190_phy *phy;
1351 u16 status;
1352
1353 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1354
1355 // Try next mii if the current one is not accessible.
1356 if (status == 0xffff || status == 0x0000)
1357 continue;
1358
1359 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1360 if (!phy) {
1361 sis190_free_phy(&tp->first_phy);
1362 rc = -ENOMEM;
1363 goto out;
1364 }
1365
1366 sis190_init_phy(dev, tp, phy, phy_id, status);
1367
1368 list_add(&tp->first_phy, &phy->list);
1369 }
1370
1371 if (list_empty(&tp->first_phy)) {
1372 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1373 pci_name(tp->pci_dev));
1374 rc = -EIO;
1375 goto out;
1376 }
1377
1378 /* Select default PHY for mac */
1379 sis190_default_phy(dev);
1380
1381 sis190_mii_probe_88e1111_fixup(tp);
1382
1383 mii_if->dev = dev;
1384 mii_if->mdio_read = __mdio_read;
1385 mii_if->mdio_write = __mdio_write;
1386 mii_if->phy_id_mask = PHY_ID_ANY;
1387 mii_if->reg_num_mask = MII_REG_ANY;
1388 out:
1389 return rc;
1390 }
1391
1392 static void __devexit sis190_mii_remove(struct net_device *dev)
1393 {
1394 struct sis190_private *tp = netdev_priv(dev);
1395
1396 sis190_free_phy(&tp->first_phy);
1397 }
1398
1399 static void sis190_release_board(struct pci_dev *pdev)
1400 {
1401 struct net_device *dev = pci_get_drvdata(pdev);
1402 struct sis190_private *tp = netdev_priv(dev);
1403
1404 iounmap(tp->mmio_addr);
1405 pci_release_regions(pdev);
1406 pci_disable_device(pdev);
1407 free_netdev(dev);
1408 }
1409
1410 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1411 {
1412 struct sis190_private *tp;
1413 struct net_device *dev;
1414 void __iomem *ioaddr;
1415 int rc;
1416
1417 dev = alloc_etherdev(sizeof(*tp));
1418 if (!dev) {
1419 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1420 rc = -ENOMEM;
1421 goto err_out_0;
1422 }
1423
1424 SET_MODULE_OWNER(dev);
1425 SET_NETDEV_DEV(dev, &pdev->dev);
1426
1427 tp = netdev_priv(dev);
1428 tp->dev = dev;
1429 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1430
1431 rc = pci_enable_device(pdev);
1432 if (rc < 0) {
1433 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1434 goto err_free_dev_1;
1435 }
1436
1437 rc = -ENODEV;
1438
1439 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1440 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1441 pci_name(pdev));
1442 goto err_pci_disable_2;
1443 }
1444 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1445 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1446 pci_name(pdev));
1447 goto err_pci_disable_2;
1448 }
1449
1450 rc = pci_request_regions(pdev, DRV_NAME);
1451 if (rc < 0) {
1452 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1453 pci_name(pdev));
1454 goto err_pci_disable_2;
1455 }
1456
1457 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1458 if (rc < 0) {
1459 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1460 pci_name(pdev));
1461 goto err_free_res_3;
1462 }
1463
1464 pci_set_master(pdev);
1465
1466 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1467 if (!ioaddr) {
1468 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1469 pci_name(pdev));
1470 rc = -EIO;
1471 goto err_free_res_3;
1472 }
1473
1474 tp->pci_dev = pdev;
1475 tp->mmio_addr = ioaddr;
1476
1477 sis190_irq_mask_and_ack(ioaddr);
1478
1479 sis190_soft_reset(ioaddr);
1480 out:
1481 return dev;
1482
1483 err_free_res_3:
1484 pci_release_regions(pdev);
1485 err_pci_disable_2:
1486 pci_disable_device(pdev);
1487 err_free_dev_1:
1488 free_netdev(dev);
1489 err_out_0:
1490 dev = ERR_PTR(rc);
1491 goto out;
1492 }
1493
1494 static void sis190_tx_timeout(struct net_device *dev)
1495 {
1496 struct sis190_private *tp = netdev_priv(dev);
1497 void __iomem *ioaddr = tp->mmio_addr;
1498 u8 tmp8;
1499
1500 /* Disable Tx, if not already */
1501 tmp8 = SIS_R8(TxControl);
1502 if (tmp8 & CmdTxEnb)
1503 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1504
1505
1506 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1507 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1508
1509 /* Disable interrupts by clearing the interrupt mask. */
1510 SIS_W32(IntrMask, 0x0000);
1511
1512 /* Stop a shared interrupt from scavenging while we are. */
1513 spin_lock_irq(&tp->lock);
1514 sis190_tx_clear(tp);
1515 spin_unlock_irq(&tp->lock);
1516
1517 /* ...and finally, reset everything. */
1518 sis190_hw_start(dev);
1519
1520 netif_wake_queue(dev);
1521 }
1522
1523 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1524 {
1525 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1526 }
1527
1528 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1529 struct net_device *dev)
1530 {
1531 struct sis190_private *tp = netdev_priv(dev);
1532 void __iomem *ioaddr = tp->mmio_addr;
1533 u16 sig;
1534 int i;
1535
1536 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1537 pci_name(pdev));
1538
1539 /* Check to see if there is a sane EEPROM */
1540 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1541
1542 if ((sig == 0xffff) || (sig == 0x0000)) {
1543 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1544 pci_name(pdev), sig);
1545 return -EIO;
1546 }
1547
1548 /* Get MAC address from EEPROM */
1549 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1550 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1551
1552 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(w);
1553 }
1554
1555 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1556
1557 return 0;
1558 }
1559
1560 /**
1561 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1562 * @pdev: PCI device
1563 * @dev: network device to get address for
1564 *
1565 * SiS965 model, use APC CMOS RAM to store MAC address.
1566 * APC CMOS RAM is accessed through ISA bridge.
1567 * MAC address is read into @net_dev->dev_addr.
1568 */
1569 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1570 struct net_device *dev)
1571 {
1572 struct sis190_private *tp = netdev_priv(dev);
1573 struct pci_dev *isa_bridge;
1574 u8 reg, tmp8;
1575 int i;
1576
1577 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1578 pci_name(pdev));
1579
1580 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1581 if (!isa_bridge)
1582 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0966, NULL);
1583
1584 if (!isa_bridge) {
1585 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1586 pci_name(pdev));
1587 return -EIO;
1588 }
1589
1590 /* Enable port 78h & 79h to access APC Registers. */
1591 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1592 reg = (tmp8 & ~0x02);
1593 pci_write_config_byte(isa_bridge, 0x48, reg);
1594 udelay(50);
1595 pci_read_config_byte(isa_bridge, 0x48, &reg);
1596
1597 for (i = 0; i < MAC_ADDR_LEN; i++) {
1598 outb(0x9 + i, 0x78);
1599 dev->dev_addr[i] = inb(0x79);
1600 }
1601
1602 outb(0x12, 0x78);
1603 reg = inb(0x79);
1604
1605 sis190_set_rgmii(tp, reg);
1606
1607 /* Restore the value to ISA Bridge */
1608 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1609 pci_dev_put(isa_bridge);
1610
1611 return 0;
1612 }
1613
1614 /**
1615 * sis190_init_rxfilter - Initialize the Rx filter
1616 * @dev: network device to initialize
1617 *
1618 * Set receive filter address to our MAC address
1619 * and enable packet filtering.
1620 */
1621 static inline void sis190_init_rxfilter(struct net_device *dev)
1622 {
1623 struct sis190_private *tp = netdev_priv(dev);
1624 void __iomem *ioaddr = tp->mmio_addr;
1625 u16 ctl;
1626 int i;
1627
1628 ctl = SIS_R16(RxMacControl);
1629 /*
1630 * Disable packet filtering before setting filter.
1631 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1632 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1633 */
1634 SIS_W16(RxMacControl, ctl & ~0x0f00);
1635
1636 for (i = 0; i < MAC_ADDR_LEN; i++)
1637 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1638
1639 SIS_W16(RxMacControl, ctl);
1640 SIS_PCI_COMMIT();
1641 }
1642
1643 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1644 {
1645 u8 from;
1646
1647 pci_read_config_byte(pdev, 0x73, &from);
1648
1649 return (from & 0x00000001) ?
1650 sis190_get_mac_addr_from_apc(pdev, dev) :
1651 sis190_get_mac_addr_from_eeprom(pdev, dev);
1652 }
1653
1654 static void sis190_set_speed_auto(struct net_device *dev)
1655 {
1656 struct sis190_private *tp = netdev_priv(dev);
1657 void __iomem *ioaddr = tp->mmio_addr;
1658 int phy_id = tp->mii_if.phy_id;
1659 int val;
1660
1661 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1662
1663 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1664
1665 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1666 // unchanged.
1667 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1668 ADVERTISE_100FULL | ADVERTISE_10FULL |
1669 ADVERTISE_100HALF | ADVERTISE_10HALF);
1670
1671 // Enable 1000 Full Mode.
1672 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1673
1674 // Enable auto-negotiation and restart auto-negotiation.
1675 mdio_write(ioaddr, phy_id, MII_BMCR,
1676 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1677 }
1678
1679 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1680 {
1681 struct sis190_private *tp = netdev_priv(dev);
1682
1683 return mii_ethtool_gset(&tp->mii_if, cmd);
1684 }
1685
1686 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1687 {
1688 struct sis190_private *tp = netdev_priv(dev);
1689
1690 return mii_ethtool_sset(&tp->mii_if, cmd);
1691 }
1692
1693 static void sis190_get_drvinfo(struct net_device *dev,
1694 struct ethtool_drvinfo *info)
1695 {
1696 struct sis190_private *tp = netdev_priv(dev);
1697
1698 strcpy(info->driver, DRV_NAME);
1699 strcpy(info->version, DRV_VERSION);
1700 strcpy(info->bus_info, pci_name(tp->pci_dev));
1701 }
1702
1703 static int sis190_get_regs_len(struct net_device *dev)
1704 {
1705 return SIS190_REGS_SIZE;
1706 }
1707
1708 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1709 void *p)
1710 {
1711 struct sis190_private *tp = netdev_priv(dev);
1712 unsigned long flags;
1713
1714 if (regs->len > SIS190_REGS_SIZE)
1715 regs->len = SIS190_REGS_SIZE;
1716
1717 spin_lock_irqsave(&tp->lock, flags);
1718 memcpy_fromio(p, tp->mmio_addr, regs->len);
1719 spin_unlock_irqrestore(&tp->lock, flags);
1720 }
1721
1722 static int sis190_nway_reset(struct net_device *dev)
1723 {
1724 struct sis190_private *tp = netdev_priv(dev);
1725
1726 return mii_nway_restart(&tp->mii_if);
1727 }
1728
1729 static u32 sis190_get_msglevel(struct net_device *dev)
1730 {
1731 struct sis190_private *tp = netdev_priv(dev);
1732
1733 return tp->msg_enable;
1734 }
1735
1736 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1737 {
1738 struct sis190_private *tp = netdev_priv(dev);
1739
1740 tp->msg_enable = value;
1741 }
1742
1743 static const struct ethtool_ops sis190_ethtool_ops = {
1744 .get_settings = sis190_get_settings,
1745 .set_settings = sis190_set_settings,
1746 .get_drvinfo = sis190_get_drvinfo,
1747 .get_regs_len = sis190_get_regs_len,
1748 .get_regs = sis190_get_regs,
1749 .get_link = ethtool_op_get_link,
1750 .get_msglevel = sis190_get_msglevel,
1751 .set_msglevel = sis190_set_msglevel,
1752 .nway_reset = sis190_nway_reset,
1753 };
1754
1755 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1756 {
1757 struct sis190_private *tp = netdev_priv(dev);
1758
1759 return !netif_running(dev) ? -EINVAL :
1760 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1761 }
1762
1763 static int __devinit sis190_init_one(struct pci_dev *pdev,
1764 const struct pci_device_id *ent)
1765 {
1766 static int printed_version = 0;
1767 struct sis190_private *tp;
1768 struct net_device *dev;
1769 void __iomem *ioaddr;
1770 int rc;
1771
1772 if (!printed_version) {
1773 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1774 printed_version = 1;
1775 }
1776
1777 dev = sis190_init_board(pdev);
1778 if (IS_ERR(dev)) {
1779 rc = PTR_ERR(dev);
1780 goto out;
1781 }
1782
1783 pci_set_drvdata(pdev, dev);
1784
1785 tp = netdev_priv(dev);
1786 ioaddr = tp->mmio_addr;
1787
1788 rc = sis190_get_mac_addr(pdev, dev);
1789 if (rc < 0)
1790 goto err_release_board;
1791
1792 sis190_init_rxfilter(dev);
1793
1794 INIT_WORK(&tp->phy_task, sis190_phy_task);
1795
1796 dev->open = sis190_open;
1797 dev->stop = sis190_close;
1798 dev->do_ioctl = sis190_ioctl;
1799 dev->get_stats = sis190_get_stats;
1800 dev->tx_timeout = sis190_tx_timeout;
1801 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1802 dev->hard_start_xmit = sis190_start_xmit;
1803 #ifdef CONFIG_NET_POLL_CONTROLLER
1804 dev->poll_controller = sis190_netpoll;
1805 #endif
1806 dev->set_multicast_list = sis190_set_rx_mode;
1807 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1808 dev->irq = pdev->irq;
1809 dev->base_addr = (unsigned long) 0xdead;
1810
1811 spin_lock_init(&tp->lock);
1812
1813 rc = sis190_mii_probe(dev);
1814 if (rc < 0)
1815 goto err_release_board;
1816
1817 rc = register_netdev(dev);
1818 if (rc < 0)
1819 goto err_remove_mii;
1820
1821 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1822 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1823 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1824 ioaddr, dev->irq,
1825 dev->dev_addr[0], dev->dev_addr[1],
1826 dev->dev_addr[2], dev->dev_addr[3],
1827 dev->dev_addr[4], dev->dev_addr[5]);
1828
1829 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1830 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1831
1832 netif_carrier_off(dev);
1833
1834 sis190_set_speed_auto(dev);
1835 out:
1836 return rc;
1837
1838 err_remove_mii:
1839 sis190_mii_remove(dev);
1840 err_release_board:
1841 sis190_release_board(pdev);
1842 goto out;
1843 }
1844
1845 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1846 {
1847 struct net_device *dev = pci_get_drvdata(pdev);
1848
1849 sis190_mii_remove(dev);
1850 flush_scheduled_work();
1851 unregister_netdev(dev);
1852 sis190_release_board(pdev);
1853 pci_set_drvdata(pdev, NULL);
1854 }
1855
1856 static struct pci_driver sis190_pci_driver = {
1857 .name = DRV_NAME,
1858 .id_table = sis190_pci_tbl,
1859 .probe = sis190_init_one,
1860 .remove = __devexit_p(sis190_remove_one),
1861 };
1862
1863 static int __init sis190_init_module(void)
1864 {
1865 return pci_register_driver(&sis190_pci_driver);
1866 }
1867
1868 static void __exit sis190_cleanup_module(void)
1869 {
1870 pci_unregister_driver(&sis190_pci_driver);
1871 }
1872
1873 module_init(sis190_init_module);
1874 module_exit(sis190_cleanup_module);