Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. | |
3 | * | |
4 | * Note: This driver is a cleanroom reimplementation based on reverse | |
5 | * engineered documentation written by Carl-Daniel Hailfinger | |
6 | * and Andrew de Quincey. It's neither supported nor endorsed | |
7 | * by NVIDIA Corp. Use at your own risk. | |
8 | * | |
9 | * NVIDIA, nForce and other NVIDIA marks are trademarks or registered | |
10 | * trademarks of NVIDIA Corporation in the United States and other | |
11 | * countries. | |
12 | * | |
1836098f | 13 | * Copyright (C) 2003,4,5 Manfred Spraul |
1da177e4 LT |
14 | * Copyright (C) 2004 Andrew de Quincey (wol support) |
15 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane | |
16 | * IRQ rate fixes, bigendian fixes, cleanups, verification) | |
17 | * Copyright (c) 2004 NVIDIA Corporation | |
18 | * | |
19 | * This program is free software; you can redistribute it and/or modify | |
20 | * it under the terms of the GNU General Public License as published by | |
21 | * the Free Software Foundation; either version 2 of the License, or | |
22 | * (at your option) any later version. | |
23 | * | |
24 | * This program is distributed in the hope that it will be useful, | |
25 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
27 | * GNU General Public License for more details. | |
28 | * | |
29 | * You should have received a copy of the GNU General Public License | |
30 | * along with this program; if not, write to the Free Software | |
31 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
32 | * | |
33 | * Changelog: | |
34 | * 0.01: 05 Oct 2003: First release that compiles without warnings. | |
35 | * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. | |
36 | * Check all PCI BARs for the register window. | |
37 | * udelay added to mii_rw. | |
38 | * 0.03: 06 Oct 2003: Initialize dev->irq. | |
39 | * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. | |
40 | * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. | |
41 | * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, | |
42 | * irq mask updated | |
43 | * 0.07: 14 Oct 2003: Further irq mask updates. | |
44 | * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill | |
45 | * added into irq handler, NULL check for drain_ring. | |
46 | * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the | |
47 | * requested interrupt sources. | |
48 | * 0.10: 20 Oct 2003: First cleanup for release. | |
49 | * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. | |
50 | * MAC Address init fix, set_multicast cleanup. | |
51 | * 0.12: 23 Oct 2003: Cleanups for release. | |
52 | * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. | |
53 | * Set link speed correctly. start rx before starting | |
54 | * tx (nv_start_rx sets the link speed). | |
55 | * 0.14: 25 Oct 2003: Nic dependant irq mask. | |
56 | * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during | |
57 | * open. | |
58 | * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size | |
59 | * increased to 1628 bytes. | |
60 | * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from | |
61 | * the tx length. | |
62 | * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats | |
63 | * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac | |
64 | * addresses, really stop rx if already running | |
65 | * in nv_start_rx, clean up a bit. | |
66 | * 0.20: 07 Dec 2003: alloc fixes | |
67 | * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. | |
68 | * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup | |
69 | * on close. | |
70 | * 0.23: 26 Jan 2004: various small cleanups | |
71 | * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces | |
72 | * 0.25: 09 Mar 2004: wol support | |
73 | * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes | |
74 | * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, | |
75 | * added CK804/MCP04 device IDs, code fixes | |
76 | * for registers, link status and other minor fixes. | |
77 | * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe | |
78 | * 0.29: 31 Aug 2004: Add backup timer for link change notification. | |
79 | * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset | |
80 | * into nv_close, otherwise reenabling for wol can | |
81 | * cause DMA to kfree'd memory. | |
82 | * 0.31: 14 Nov 2004: ethtool support for getting/setting link | |
4ea7f299 | 83 | * capabilities. |
22c6d143 | 84 | * 0.32: 16 Apr 2005: RX_ERROR4 handling added. |
8f767fc8 MS |
85 | * 0.33: 16 May 2005: Support for MCP51 added. |
86 | * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. | |
f49d16ef | 87 | * 0.35: 26 Jun 2005: Support for MCP55 added. |
dc8216c1 MS |
88 | * 0.36: 28 Jun 2005: Add jumbo frame support. |
89 | * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list | |
c2dba06d MS |
90 | * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of |
91 | * per-packet flags. | |
4ea7f299 AA |
92 | * 0.39: 18 Jul 2005: Add 64bit descriptor support. |
93 | * 0.40: 19 Jul 2005: Add support for mac address change. | |
94 | * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead | |
b3df9f81 | 95 | * of nv_remove |
4ea7f299 | 96 | * 0.42: 06 Aug 2005: Fix lack of link speed initialization |
1b1b3c9b | 97 | * in the second (and later) nv_open call |
4ea7f299 AA |
98 | * 0.43: 10 Aug 2005: Add support for tx checksum. |
99 | * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. | |
100 | * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check | |
a971c324 | 101 | * 0.46: 20 Oct 2005: Add irq optimization modes. |
7a33e45a | 102 | * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. |
1836098f | 103 | * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single |
fa45459e | 104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. |
ee407b02 | 105 | * 0.50: 20 Jan 2006: Add 8021pq tagging support. |
0832b25a | 106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. |
d33a73c8 | 107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. |
86a0f043 | 108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. |
84b3932b | 109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. |
eb91f61b | 110 | * 0.55: 22 Mar 2006: Add flow control (pause frame). |
1da177e4 LT |
111 | * |
112 | * Known bugs: | |
113 | * We suspect that on some hardware no TX done interrupts are generated. | |
114 | * This means recovery from netif_stop_queue only happens if the hw timer | |
115 | * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) | |
116 | * and the timer is active in the IRQMask, or if a rx packet arrives by chance. | |
117 | * If your hardware reliably generates tx done interrupts, then you can remove | |
118 | * DEV_NEED_TIMERIRQ from the driver_data flags. | |
119 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | |
120 | * superfluous timer interrupts from the nic. | |
121 | */ | |
eb91f61b | 122 | #define FORCEDETH_VERSION "0.55" |
1da177e4 LT |
123 | #define DRV_NAME "forcedeth" |
124 | ||
125 | #include <linux/module.h> | |
126 | #include <linux/types.h> | |
127 | #include <linux/pci.h> | |
128 | #include <linux/interrupt.h> | |
129 | #include <linux/netdevice.h> | |
130 | #include <linux/etherdevice.h> | |
131 | #include <linux/delay.h> | |
132 | #include <linux/spinlock.h> | |
133 | #include <linux/ethtool.h> | |
134 | #include <linux/timer.h> | |
135 | #include <linux/skbuff.h> | |
136 | #include <linux/mii.h> | |
137 | #include <linux/random.h> | |
138 | #include <linux/init.h> | |
22c6d143 | 139 | #include <linux/if_vlan.h> |
910638ae | 140 | #include <linux/dma-mapping.h> |
1da177e4 LT |
141 | |
142 | #include <asm/irq.h> | |
143 | #include <asm/io.h> | |
144 | #include <asm/uaccess.h> | |
145 | #include <asm/system.h> | |
146 | ||
147 | #if 0 | |
148 | #define dprintk printk | |
149 | #else | |
150 | #define dprintk(x...) do { } while (0) | |
151 | #endif | |
152 | ||
153 | ||
154 | /* | |
155 | * Hardware access: | |
156 | */ | |
157 | ||
c2dba06d MS |
158 | #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ |
159 | #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ | |
160 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ | |
ee73362c | 161 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ |
8a4ae7f2 | 162 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ |
ee407b02 | 163 | #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ |
d33a73c8 AA |
164 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ |
165 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | |
86a0f043 | 166 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ |
eb91f61b | 167 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ |
1da177e4 LT |
168 | |
169 | enum { | |
170 | NvRegIrqStatus = 0x000, | |
171 | #define NVREG_IRQSTAT_MIIEVENT 0x040 | |
172 | #define NVREG_IRQSTAT_MASK 0x1ff | |
173 | NvRegIrqMask = 0x004, | |
174 | #define NVREG_IRQ_RX_ERROR 0x0001 | |
175 | #define NVREG_IRQ_RX 0x0002 | |
176 | #define NVREG_IRQ_RX_NOBUF 0x0004 | |
177 | #define NVREG_IRQ_TX_ERR 0x0008 | |
c2dba06d | 178 | #define NVREG_IRQ_TX_OK 0x0010 |
1da177e4 LT |
179 | #define NVREG_IRQ_TIMER 0x0020 |
180 | #define NVREG_IRQ_LINK 0x0040 | |
d33a73c8 AA |
181 | #define NVREG_IRQ_RX_FORCED 0x0080 |
182 | #define NVREG_IRQ_TX_FORCED 0x0100 | |
a971c324 AA |
183 | #define NVREG_IRQMASK_THROUGHPUT 0x00df |
184 | #define NVREG_IRQMASK_CPU 0x0040 | |
d33a73c8 AA |
185 | #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) |
186 | #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) | |
187 | #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) | |
c2dba06d MS |
188 | |
189 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ | |
d33a73c8 AA |
190 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ |
191 | NVREG_IRQ_TX_FORCED)) | |
1da177e4 LT |
192 | |
193 | NvRegUnknownSetupReg6 = 0x008, | |
194 | #define NVREG_UNKSETUP6_VAL 3 | |
195 | ||
196 | /* | |
197 | * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic | |
198 | * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms | |
199 | */ | |
200 | NvRegPollingInterval = 0x00c, | |
a971c324 AA |
201 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 |
202 | #define NVREG_POLL_DEFAULT_CPU 13 | |
d33a73c8 AA |
203 | NvRegMSIMap0 = 0x020, |
204 | NvRegMSIMap1 = 0x024, | |
205 | NvRegMSIIrqMask = 0x030, | |
206 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 | |
1da177e4 | 207 | NvRegMisc1 = 0x080, |
eb91f61b | 208 | #define NVREG_MISC1_PAUSE_TX 0x01 |
1da177e4 LT |
209 | #define NVREG_MISC1_HD 0x02 |
210 | #define NVREG_MISC1_FORCE 0x3b0f3c | |
211 | ||
86a0f043 AA |
212 | NvRegMacReset = 0x3c, |
213 | #define NVREG_MAC_RESET_ASSERT 0x0F3 | |
1da177e4 LT |
214 | NvRegTransmitterControl = 0x084, |
215 | #define NVREG_XMITCTL_START 0x01 | |
216 | NvRegTransmitterStatus = 0x088, | |
217 | #define NVREG_XMITSTAT_BUSY 0x01 | |
218 | ||
219 | NvRegPacketFilterFlags = 0x8c, | |
eb91f61b AA |
220 | #define NVREG_PFF_PAUSE_RX 0x08 |
221 | #define NVREG_PFF_ALWAYS 0x7F0000 | |
1da177e4 LT |
222 | #define NVREG_PFF_PROMISC 0x80 |
223 | #define NVREG_PFF_MYADDR 0x20 | |
224 | ||
225 | NvRegOffloadConfig = 0x90, | |
226 | #define NVREG_OFFLOAD_HOMEPHY 0x601 | |
227 | #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE | |
228 | NvRegReceiverControl = 0x094, | |
229 | #define NVREG_RCVCTL_START 0x01 | |
230 | NvRegReceiverStatus = 0x98, | |
231 | #define NVREG_RCVSTAT_BUSY 0x01 | |
232 | ||
233 | NvRegRandomSeed = 0x9c, | |
234 | #define NVREG_RNDSEED_MASK 0x00ff | |
235 | #define NVREG_RNDSEED_FORCE 0x7f00 | |
236 | #define NVREG_RNDSEED_FORCE2 0x2d00 | |
237 | #define NVREG_RNDSEED_FORCE3 0x7400 | |
238 | ||
239 | NvRegUnknownSetupReg1 = 0xA0, | |
240 | #define NVREG_UNKSETUP1_VAL 0x16070f | |
241 | NvRegUnknownSetupReg2 = 0xA4, | |
242 | #define NVREG_UNKSETUP2_VAL 0x16 | |
243 | NvRegMacAddrA = 0xA8, | |
244 | NvRegMacAddrB = 0xAC, | |
245 | NvRegMulticastAddrA = 0xB0, | |
246 | #define NVREG_MCASTADDRA_FORCE 0x01 | |
247 | NvRegMulticastAddrB = 0xB4, | |
248 | NvRegMulticastMaskA = 0xB8, | |
249 | NvRegMulticastMaskB = 0xBC, | |
250 | ||
251 | NvRegPhyInterface = 0xC0, | |
252 | #define PHY_RGMII 0x10000000 | |
253 | ||
254 | NvRegTxRingPhysAddr = 0x100, | |
255 | NvRegRxRingPhysAddr = 0x104, | |
256 | NvRegRingSizes = 0x108, | |
257 | #define NVREG_RINGSZ_TXSHIFT 0 | |
258 | #define NVREG_RINGSZ_RXSHIFT 16 | |
259 | NvRegUnknownTransmitterReg = 0x10c, | |
260 | NvRegLinkSpeed = 0x110, | |
261 | #define NVREG_LINKSPEED_FORCE 0x10000 | |
262 | #define NVREG_LINKSPEED_10 1000 | |
263 | #define NVREG_LINKSPEED_100 100 | |
264 | #define NVREG_LINKSPEED_1000 50 | |
265 | #define NVREG_LINKSPEED_MASK (0xFFF) | |
266 | NvRegUnknownSetupReg5 = 0x130, | |
267 | #define NVREG_UNKSETUP5_BIT31 (1<<31) | |
268 | NvRegUnknownSetupReg3 = 0x13c, | |
269 | #define NVREG_UNKSETUP3_VAL1 0x200010 | |
270 | NvRegTxRxControl = 0x144, | |
271 | #define NVREG_TXRXCTL_KICK 0x0001 | |
272 | #define NVREG_TXRXCTL_BIT1 0x0002 | |
273 | #define NVREG_TXRXCTL_BIT2 0x0004 | |
274 | #define NVREG_TXRXCTL_IDLE 0x0008 | |
275 | #define NVREG_TXRXCTL_RESET 0x0010 | |
276 | #define NVREG_TXRXCTL_RXCHECK 0x0400 | |
8a4ae7f2 MS |
277 | #define NVREG_TXRXCTL_DESC_1 0 |
278 | #define NVREG_TXRXCTL_DESC_2 0x02100 | |
279 | #define NVREG_TXRXCTL_DESC_3 0x02200 | |
ee407b02 AA |
280 | #define NVREG_TXRXCTL_VLANSTRIP 0x00040 |
281 | #define NVREG_TXRXCTL_VLANINS 0x00080 | |
0832b25a AA |
282 | NvRegTxRingPhysAddrHigh = 0x148, |
283 | NvRegRxRingPhysAddrHigh = 0x14C, | |
eb91f61b AA |
284 | NvRegTxPauseFrame = 0x170, |
285 | #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 | |
286 | #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 | |
1da177e4 LT |
287 | NvRegMIIStatus = 0x180, |
288 | #define NVREG_MIISTAT_ERROR 0x0001 | |
289 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | |
290 | #define NVREG_MIISTAT_MASK 0x000f | |
291 | #define NVREG_MIISTAT_MASK2 0x000f | |
292 | NvRegUnknownSetupReg4 = 0x184, | |
293 | #define NVREG_UNKSETUP4_VAL 8 | |
294 | ||
295 | NvRegAdapterControl = 0x188, | |
296 | #define NVREG_ADAPTCTL_START 0x02 | |
297 | #define NVREG_ADAPTCTL_LINKUP 0x04 | |
298 | #define NVREG_ADAPTCTL_PHYVALID 0x40000 | |
299 | #define NVREG_ADAPTCTL_RUNNING 0x100000 | |
300 | #define NVREG_ADAPTCTL_PHYSHIFT 24 | |
301 | NvRegMIISpeed = 0x18c, | |
302 | #define NVREG_MIISPEED_BIT8 (1<<8) | |
303 | #define NVREG_MIIDELAY 5 | |
304 | NvRegMIIControl = 0x190, | |
305 | #define NVREG_MIICTL_INUSE 0x08000 | |
306 | #define NVREG_MIICTL_WRITE 0x00400 | |
307 | #define NVREG_MIICTL_ADDRSHIFT 5 | |
308 | NvRegMIIData = 0x194, | |
309 | NvRegWakeUpFlags = 0x200, | |
310 | #define NVREG_WAKEUPFLAGS_VAL 0x7770 | |
311 | #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 | |
312 | #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 | |
313 | #define NVREG_WAKEUPFLAGS_D3SHIFT 12 | |
314 | #define NVREG_WAKEUPFLAGS_D2SHIFT 8 | |
315 | #define NVREG_WAKEUPFLAGS_D1SHIFT 4 | |
316 | #define NVREG_WAKEUPFLAGS_D0SHIFT 0 | |
317 | #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 | |
318 | #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 | |
319 | #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 | |
320 | #define NVREG_WAKEUPFLAGS_ENABLE 0x1111 | |
321 | ||
322 | NvRegPatternCRC = 0x204, | |
323 | NvRegPatternMask = 0x208, | |
324 | NvRegPowerCap = 0x268, | |
325 | #define NVREG_POWERCAP_D3SUPP (1<<30) | |
326 | #define NVREG_POWERCAP_D2SUPP (1<<26) | |
327 | #define NVREG_POWERCAP_D1SUPP (1<<25) | |
328 | NvRegPowerState = 0x26c, | |
329 | #define NVREG_POWERSTATE_POWEREDUP 0x8000 | |
330 | #define NVREG_POWERSTATE_VALID 0x0100 | |
331 | #define NVREG_POWERSTATE_MASK 0x0003 | |
332 | #define NVREG_POWERSTATE_D0 0x0000 | |
333 | #define NVREG_POWERSTATE_D1 0x0001 | |
334 | #define NVREG_POWERSTATE_D2 0x0002 | |
335 | #define NVREG_POWERSTATE_D3 0x0003 | |
ee407b02 AA |
336 | NvRegVlanControl = 0x300, |
337 | #define NVREG_VLANCONTROL_ENABLE 0x2000 | |
d33a73c8 AA |
338 | NvRegMSIXMap0 = 0x3e0, |
339 | NvRegMSIXMap1 = 0x3e4, | |
340 | NvRegMSIXIrqStatus = 0x3f0, | |
86a0f043 AA |
341 | |
342 | NvRegPowerState2 = 0x600, | |
343 | #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 | |
344 | #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 | |
1da177e4 LT |
345 | }; |
346 | ||
347 | /* Big endian: should work, but is untested */ | |
348 | struct ring_desc { | |
349 | u32 PacketBuffer; | |
350 | u32 FlagLen; | |
351 | }; | |
352 | ||
ee73362c MS |
353 | struct ring_desc_ex { |
354 | u32 PacketBufferHigh; | |
355 | u32 PacketBufferLow; | |
ee407b02 | 356 | u32 TxVlan; |
ee73362c MS |
357 | u32 FlagLen; |
358 | }; | |
359 | ||
360 | typedef union _ring_type { | |
361 | struct ring_desc* orig; | |
362 | struct ring_desc_ex* ex; | |
363 | } ring_type; | |
364 | ||
1da177e4 LT |
365 | #define FLAG_MASK_V1 0xffff0000 |
366 | #define FLAG_MASK_V2 0xffffc000 | |
367 | #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) | |
368 | #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) | |
369 | ||
370 | #define NV_TX_LASTPACKET (1<<16) | |
371 | #define NV_TX_RETRYERROR (1<<19) | |
c2dba06d | 372 | #define NV_TX_FORCED_INTERRUPT (1<<24) |
1da177e4 LT |
373 | #define NV_TX_DEFERRED (1<<26) |
374 | #define NV_TX_CARRIERLOST (1<<27) | |
375 | #define NV_TX_LATECOLLISION (1<<28) | |
376 | #define NV_TX_UNDERFLOW (1<<29) | |
377 | #define NV_TX_ERROR (1<<30) | |
378 | #define NV_TX_VALID (1<<31) | |
379 | ||
380 | #define NV_TX2_LASTPACKET (1<<29) | |
381 | #define NV_TX2_RETRYERROR (1<<18) | |
c2dba06d | 382 | #define NV_TX2_FORCED_INTERRUPT (1<<30) |
1da177e4 LT |
383 | #define NV_TX2_DEFERRED (1<<25) |
384 | #define NV_TX2_CARRIERLOST (1<<26) | |
385 | #define NV_TX2_LATECOLLISION (1<<27) | |
386 | #define NV_TX2_UNDERFLOW (1<<28) | |
387 | /* error and valid are the same for both */ | |
388 | #define NV_TX2_ERROR (1<<30) | |
389 | #define NV_TX2_VALID (1<<31) | |
ac9c1897 AA |
390 | #define NV_TX2_TSO (1<<28) |
391 | #define NV_TX2_TSO_SHIFT 14 | |
fa45459e AA |
392 | #define NV_TX2_TSO_MAX_SHIFT 14 |
393 | #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) | |
8a4ae7f2 MS |
394 | #define NV_TX2_CHECKSUM_L3 (1<<27) |
395 | #define NV_TX2_CHECKSUM_L4 (1<<26) | |
1da177e4 | 396 | |
ee407b02 AA |
397 | #define NV_TX3_VLAN_TAG_PRESENT (1<<18) |
398 | ||
1da177e4 LT |
399 | #define NV_RX_DESCRIPTORVALID (1<<16) |
400 | #define NV_RX_MISSEDFRAME (1<<17) | |
401 | #define NV_RX_SUBSTRACT1 (1<<18) | |
402 | #define NV_RX_ERROR1 (1<<23) | |
403 | #define NV_RX_ERROR2 (1<<24) | |
404 | #define NV_RX_ERROR3 (1<<25) | |
405 | #define NV_RX_ERROR4 (1<<26) | |
406 | #define NV_RX_CRCERR (1<<27) | |
407 | #define NV_RX_OVERFLOW (1<<28) | |
408 | #define NV_RX_FRAMINGERR (1<<29) | |
409 | #define NV_RX_ERROR (1<<30) | |
410 | #define NV_RX_AVAIL (1<<31) | |
411 | ||
412 | #define NV_RX2_CHECKSUMMASK (0x1C000000) | |
413 | #define NV_RX2_CHECKSUMOK1 (0x10000000) | |
414 | #define NV_RX2_CHECKSUMOK2 (0x14000000) | |
415 | #define NV_RX2_CHECKSUMOK3 (0x18000000) | |
416 | #define NV_RX2_DESCRIPTORVALID (1<<29) | |
417 | #define NV_RX2_SUBSTRACT1 (1<<25) | |
418 | #define NV_RX2_ERROR1 (1<<18) | |
419 | #define NV_RX2_ERROR2 (1<<19) | |
420 | #define NV_RX2_ERROR3 (1<<20) | |
421 | #define NV_RX2_ERROR4 (1<<21) | |
422 | #define NV_RX2_CRCERR (1<<22) | |
423 | #define NV_RX2_OVERFLOW (1<<23) | |
424 | #define NV_RX2_FRAMINGERR (1<<24) | |
425 | /* error and avail are the same for both */ | |
426 | #define NV_RX2_ERROR (1<<30) | |
427 | #define NV_RX2_AVAIL (1<<31) | |
428 | ||
ee407b02 AA |
429 | #define NV_RX3_VLAN_TAG_PRESENT (1<<16) |
430 | #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) | |
431 | ||
1da177e4 | 432 | /* Miscelaneous hardware related defines: */ |
86a0f043 AA |
433 | #define NV_PCI_REGSZ_VER1 0x270 |
434 | #define NV_PCI_REGSZ_VER2 0x604 | |
1da177e4 LT |
435 | |
436 | /* various timeout delays: all in usec */ | |
437 | #define NV_TXRX_RESET_DELAY 4 | |
438 | #define NV_TXSTOP_DELAY1 10 | |
439 | #define NV_TXSTOP_DELAY1MAX 500000 | |
440 | #define NV_TXSTOP_DELAY2 100 | |
441 | #define NV_RXSTOP_DELAY1 10 | |
442 | #define NV_RXSTOP_DELAY1MAX 500000 | |
443 | #define NV_RXSTOP_DELAY2 100 | |
444 | #define NV_SETUP5_DELAY 5 | |
445 | #define NV_SETUP5_DELAYMAX 50000 | |
446 | #define NV_POWERUP_DELAY 5 | |
447 | #define NV_POWERUP_DELAYMAX 5000 | |
448 | #define NV_MIIBUSY_DELAY 50 | |
449 | #define NV_MIIPHY_DELAY 10 | |
450 | #define NV_MIIPHY_DELAYMAX 10000 | |
86a0f043 | 451 | #define NV_MAC_RESET_DELAY 64 |
1da177e4 LT |
452 | |
453 | #define NV_WAKEUPPATTERNS 5 | |
454 | #define NV_WAKEUPMASKENTRIES 4 | |
455 | ||
456 | /* General driver defaults */ | |
457 | #define NV_WATCHDOG_TIMEO (5*HZ) | |
458 | ||
eafa59f6 AA |
459 | #define RX_RING_DEFAULT 128 |
460 | #define TX_RING_DEFAULT 256 | |
461 | #define RX_RING_MIN 128 | |
462 | #define TX_RING_MIN 64 | |
463 | #define RING_MAX_DESC_VER_1 1024 | |
464 | #define RING_MAX_DESC_VER_2_3 16384 | |
f3b197ac | 465 | /* |
eafa59f6 AA |
466 | * Difference between the get and put pointers for the tx ring. |
467 | * This is used to throttle the amount of data outstanding in the | |
468 | * tx ring. | |
1da177e4 | 469 | */ |
eafa59f6 | 470 | #define TX_LIMIT_DIFFERENCE 1 |
1da177e4 LT |
471 | |
472 | /* rx/tx mac addr + type + vlan + align + slack*/ | |
d81c0983 MS |
473 | #define NV_RX_HEADERS (64) |
474 | /* even more slack. */ | |
475 | #define NV_RX_ALLOC_PAD (64) | |
476 | ||
477 | /* maximum mtu size */ | |
478 | #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ | |
479 | #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ | |
1da177e4 LT |
480 | |
481 | #define OOM_REFILL (1+HZ/20) | |
482 | #define POLL_WAIT (1+HZ/100) | |
483 | #define LINK_TIMEOUT (3*HZ) | |
484 | ||
f3b197ac | 485 | /* |
1da177e4 | 486 | * desc_ver values: |
8a4ae7f2 MS |
487 | * The nic supports three different descriptor types: |
488 | * - DESC_VER_1: Original | |
489 | * - DESC_VER_2: support for jumbo frames. | |
490 | * - DESC_VER_3: 64-bit format. | |
1da177e4 | 491 | */ |
8a4ae7f2 MS |
492 | #define DESC_VER_1 1 |
493 | #define DESC_VER_2 2 | |
494 | #define DESC_VER_3 3 | |
1da177e4 LT |
495 | |
496 | /* PHY defines */ | |
497 | #define PHY_OUI_MARVELL 0x5043 | |
498 | #define PHY_OUI_CICADA 0x03f1 | |
499 | #define PHYID1_OUI_MASK 0x03ff | |
500 | #define PHYID1_OUI_SHFT 6 | |
501 | #define PHYID2_OUI_MASK 0xfc00 | |
502 | #define PHYID2_OUI_SHFT 10 | |
503 | #define PHY_INIT1 0x0f000 | |
504 | #define PHY_INIT2 0x0e00 | |
505 | #define PHY_INIT3 0x01000 | |
506 | #define PHY_INIT4 0x0200 | |
507 | #define PHY_INIT5 0x0004 | |
508 | #define PHY_INIT6 0x02000 | |
509 | #define PHY_GIGABIT 0x0100 | |
510 | ||
511 | #define PHY_TIMEOUT 0x1 | |
512 | #define PHY_ERROR 0x2 | |
513 | ||
514 | #define PHY_100 0x1 | |
515 | #define PHY_1000 0x2 | |
516 | #define PHY_HALF 0x100 | |
517 | ||
eb91f61b AA |
518 | #define NV_PAUSEFRAME_RX_CAPABLE 0x0001 |
519 | #define NV_PAUSEFRAME_TX_CAPABLE 0x0002 | |
520 | #define NV_PAUSEFRAME_RX_ENABLE 0x0004 | |
521 | #define NV_PAUSEFRAME_TX_ENABLE 0x0008 | |
b6d0773f AA |
522 | #define NV_PAUSEFRAME_RX_REQ 0x0010 |
523 | #define NV_PAUSEFRAME_TX_REQ 0x0020 | |
524 | #define NV_PAUSEFRAME_AUTONEG 0x0040 | |
1da177e4 | 525 | |
d33a73c8 AA |
526 | /* MSI/MSI-X defines */ |
527 | #define NV_MSI_X_MAX_VECTORS 8 | |
528 | #define NV_MSI_X_VECTORS_MASK 0x000f | |
529 | #define NV_MSI_CAPABLE 0x0010 | |
530 | #define NV_MSI_X_CAPABLE 0x0020 | |
531 | #define NV_MSI_ENABLED 0x0040 | |
532 | #define NV_MSI_X_ENABLED 0x0080 | |
533 | ||
534 | #define NV_MSI_X_VECTOR_ALL 0x0 | |
535 | #define NV_MSI_X_VECTOR_RX 0x0 | |
536 | #define NV_MSI_X_VECTOR_TX 0x1 | |
537 | #define NV_MSI_X_VECTOR_OTHER 0x2 | |
1da177e4 LT |
538 | |
539 | /* | |
540 | * SMP locking: | |
541 | * All hardware access under dev->priv->lock, except the performance | |
542 | * critical parts: | |
543 | * - rx is (pseudo-) lockless: it relies on the single-threading provided | |
544 | * by the arch code for interrupts. | |
545 | * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission | |
546 | * needs dev->priv->lock :-( | |
547 | * - set_multicast_list: preparation lockless, relies on dev->xmit_lock. | |
548 | */ | |
549 | ||
550 | /* in dev: base, irq */ | |
551 | struct fe_priv { | |
552 | spinlock_t lock; | |
553 | ||
554 | /* General data: | |
555 | * Locking: spin_lock(&np->lock); */ | |
556 | struct net_device_stats stats; | |
557 | int in_shutdown; | |
558 | u32 linkspeed; | |
559 | int duplex; | |
560 | int autoneg; | |
561 | int fixed_mode; | |
562 | int phyaddr; | |
563 | int wolenabled; | |
564 | unsigned int phy_oui; | |
565 | u16 gigabit; | |
566 | ||
567 | /* General data: RO fields */ | |
568 | dma_addr_t ring_addr; | |
569 | struct pci_dev *pci_dev; | |
570 | u32 orig_mac[2]; | |
571 | u32 irqmask; | |
572 | u32 desc_ver; | |
8a4ae7f2 | 573 | u32 txrxctl_bits; |
ee407b02 | 574 | u32 vlanctl_bits; |
86a0f043 AA |
575 | u32 driver_data; |
576 | u32 register_size; | |
1da177e4 LT |
577 | |
578 | void __iomem *base; | |
579 | ||
580 | /* rx specific fields. | |
581 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | |
582 | */ | |
ee73362c | 583 | ring_type rx_ring; |
1da177e4 | 584 | unsigned int cur_rx, refill_rx; |
eafa59f6 AA |
585 | struct sk_buff **rx_skbuff; |
586 | dma_addr_t *rx_dma; | |
1da177e4 | 587 | unsigned int rx_buf_sz; |
d81c0983 | 588 | unsigned int pkt_limit; |
1da177e4 LT |
589 | struct timer_list oom_kick; |
590 | struct timer_list nic_poll; | |
d33a73c8 | 591 | u32 nic_poll_irq; |
eafa59f6 | 592 | int rx_ring_size; |
1da177e4 LT |
593 | |
594 | /* media detection workaround. | |
595 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | |
596 | */ | |
597 | int need_linktimer; | |
598 | unsigned long link_timeout; | |
599 | /* | |
600 | * tx specific fields. | |
601 | */ | |
ee73362c | 602 | ring_type tx_ring; |
1da177e4 | 603 | unsigned int next_tx, nic_tx; |
eafa59f6 AA |
604 | struct sk_buff **tx_skbuff; |
605 | dma_addr_t *tx_dma; | |
606 | unsigned int *tx_dma_len; | |
1da177e4 | 607 | u32 tx_flags; |
eafa59f6 AA |
608 | int tx_ring_size; |
609 | int tx_limit_start; | |
610 | int tx_limit_stop; | |
ee407b02 AA |
611 | |
612 | /* vlan fields */ | |
613 | struct vlan_group *vlangrp; | |
d33a73c8 AA |
614 | |
615 | /* msi/msi-x fields */ | |
616 | u32 msi_flags; | |
617 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | |
eb91f61b AA |
618 | |
619 | /* flow control */ | |
620 | u32 pause_flags; | |
1da177e4 LT |
621 | }; |
622 | ||
623 | /* | |
624 | * Maximum number of loops until we assume that a bit in the irq mask | |
625 | * is stuck. Overridable with module param. | |
626 | */ | |
627 | static int max_interrupt_work = 5; | |
628 | ||
a971c324 AA |
629 | /* |
630 | * Optimization can be either throuput mode or cpu mode | |
f3b197ac | 631 | * |
a971c324 AA |
632 | * Throughput Mode: Every tx and rx packet will generate an interrupt. |
633 | * CPU Mode: Interrupts are controlled by a timer. | |
634 | */ | |
635 | #define NV_OPTIMIZATION_MODE_THROUGHPUT 0 | |
636 | #define NV_OPTIMIZATION_MODE_CPU 1 | |
637 | static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; | |
638 | ||
639 | /* | |
640 | * Poll interval for timer irq | |
641 | * | |
642 | * This interval determines how frequent an interrupt is generated. | |
643 | * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] | |
644 | * Min = 0, and Max = 65535 | |
645 | */ | |
646 | static int poll_interval = -1; | |
647 | ||
d33a73c8 AA |
648 | /* |
649 | * Disable MSI interrupts | |
650 | */ | |
651 | static int disable_msi = 0; | |
652 | ||
653 | /* | |
654 | * Disable MSIX interrupts | |
655 | */ | |
656 | static int disable_msix = 0; | |
657 | ||
1da177e4 LT |
658 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) |
659 | { | |
660 | return netdev_priv(dev); | |
661 | } | |
662 | ||
663 | static inline u8 __iomem *get_hwbase(struct net_device *dev) | |
664 | { | |
ac9c1897 | 665 | return ((struct fe_priv *)netdev_priv(dev))->base; |
1da177e4 LT |
666 | } |
667 | ||
668 | static inline void pci_push(u8 __iomem *base) | |
669 | { | |
670 | /* force out pending posted writes */ | |
671 | readl(base); | |
672 | } | |
673 | ||
674 | static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) | |
675 | { | |
676 | return le32_to_cpu(prd->FlagLen) | |
677 | & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); | |
678 | } | |
679 | ||
ee73362c MS |
680 | static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) |
681 | { | |
682 | return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; | |
683 | } | |
684 | ||
1da177e4 LT |
685 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, |
686 | int delay, int delaymax, const char *msg) | |
687 | { | |
688 | u8 __iomem *base = get_hwbase(dev); | |
689 | ||
690 | pci_push(base); | |
691 | do { | |
692 | udelay(delay); | |
693 | delaymax -= delay; | |
694 | if (delaymax < 0) { | |
695 | if (msg) | |
696 | printk(msg); | |
697 | return 1; | |
698 | } | |
699 | } while ((readl(base + offset) & mask) != target); | |
700 | return 0; | |
701 | } | |
702 | ||
0832b25a AA |
703 | #define NV_SETUP_RX_RING 0x01 |
704 | #define NV_SETUP_TX_RING 0x02 | |
705 | ||
706 | static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |
707 | { | |
708 | struct fe_priv *np = get_nvpriv(dev); | |
709 | u8 __iomem *base = get_hwbase(dev); | |
710 | ||
711 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
712 | if (rxtx_flags & NV_SETUP_RX_RING) { | |
713 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | |
714 | } | |
715 | if (rxtx_flags & NV_SETUP_TX_RING) { | |
eafa59f6 | 716 | writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); |
0832b25a AA |
717 | } |
718 | } else { | |
719 | if (rxtx_flags & NV_SETUP_RX_RING) { | |
720 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | |
721 | writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); | |
722 | } | |
723 | if (rxtx_flags & NV_SETUP_TX_RING) { | |
eafa59f6 AA |
724 | writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); |
725 | writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); | |
0832b25a AA |
726 | } |
727 | } | |
728 | } | |
729 | ||
eafa59f6 AA |
730 | static void free_rings(struct net_device *dev) |
731 | { | |
732 | struct fe_priv *np = get_nvpriv(dev); | |
733 | ||
734 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
735 | if(np->rx_ring.orig) | |
736 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), | |
737 | np->rx_ring.orig, np->ring_addr); | |
738 | } else { | |
739 | if (np->rx_ring.ex) | |
740 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), | |
741 | np->rx_ring.ex, np->ring_addr); | |
742 | } | |
743 | if (np->rx_skbuff) | |
744 | kfree(np->rx_skbuff); | |
745 | if (np->rx_dma) | |
746 | kfree(np->rx_dma); | |
747 | if (np->tx_skbuff) | |
748 | kfree(np->tx_skbuff); | |
749 | if (np->tx_dma) | |
750 | kfree(np->tx_dma); | |
751 | if (np->tx_dma_len) | |
752 | kfree(np->tx_dma_len); | |
753 | } | |
754 | ||
84b3932b AA |
755 | static int using_multi_irqs(struct net_device *dev) |
756 | { | |
757 | struct fe_priv *np = get_nvpriv(dev); | |
758 | ||
759 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | |
760 | ((np->msi_flags & NV_MSI_X_ENABLED) && | |
761 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) | |
762 | return 0; | |
763 | else | |
764 | return 1; | |
765 | } | |
766 | ||
767 | static void nv_enable_irq(struct net_device *dev) | |
768 | { | |
769 | struct fe_priv *np = get_nvpriv(dev); | |
770 | ||
771 | if (!using_multi_irqs(dev)) { | |
772 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
773 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
774 | else | |
775 | enable_irq(dev->irq); | |
776 | } else { | |
777 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
778 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
779 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
780 | } | |
781 | } | |
782 | ||
783 | static void nv_disable_irq(struct net_device *dev) | |
784 | { | |
785 | struct fe_priv *np = get_nvpriv(dev); | |
786 | ||
787 | if (!using_multi_irqs(dev)) { | |
788 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
789 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
790 | else | |
791 | disable_irq(dev->irq); | |
792 | } else { | |
793 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
794 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
795 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
796 | } | |
797 | } | |
798 | ||
799 | /* In MSIX mode, a write to irqmask behaves as XOR */ | |
800 | static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) | |
801 | { | |
802 | u8 __iomem *base = get_hwbase(dev); | |
803 | ||
804 | writel(mask, base + NvRegIrqMask); | |
805 | } | |
806 | ||
807 | static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) | |
808 | { | |
809 | struct fe_priv *np = get_nvpriv(dev); | |
810 | u8 __iomem *base = get_hwbase(dev); | |
811 | ||
812 | if (np->msi_flags & NV_MSI_X_ENABLED) { | |
813 | writel(mask, base + NvRegIrqMask); | |
814 | } else { | |
815 | if (np->msi_flags & NV_MSI_ENABLED) | |
816 | writel(0, base + NvRegMSIIrqMask); | |
817 | writel(0, base + NvRegIrqMask); | |
818 | } | |
819 | } | |
820 | ||
1da177e4 LT |
821 | #define MII_READ (-1) |
822 | /* mii_rw: read/write a register on the PHY. | |
823 | * | |
824 | * Caller must guarantee serialization | |
825 | */ | |
826 | static int mii_rw(struct net_device *dev, int addr, int miireg, int value) | |
827 | { | |
828 | u8 __iomem *base = get_hwbase(dev); | |
829 | u32 reg; | |
830 | int retval; | |
831 | ||
832 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
833 | ||
834 | reg = readl(base + NvRegMIIControl); | |
835 | if (reg & NVREG_MIICTL_INUSE) { | |
836 | writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); | |
837 | udelay(NV_MIIBUSY_DELAY); | |
838 | } | |
839 | ||
840 | reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; | |
841 | if (value != MII_READ) { | |
842 | writel(value, base + NvRegMIIData); | |
843 | reg |= NVREG_MIICTL_WRITE; | |
844 | } | |
845 | writel(reg, base + NvRegMIIControl); | |
846 | ||
847 | if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, | |
848 | NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { | |
849 | dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", | |
850 | dev->name, miireg, addr); | |
851 | retval = -1; | |
852 | } else if (value != MII_READ) { | |
853 | /* it was a write operation - fewer failures are detectable */ | |
854 | dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", | |
855 | dev->name, value, miireg, addr); | |
856 | retval = 0; | |
857 | } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { | |
858 | dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", | |
859 | dev->name, miireg, addr); | |
860 | retval = -1; | |
861 | } else { | |
862 | retval = readl(base + NvRegMIIData); | |
863 | dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", | |
864 | dev->name, miireg, addr, retval); | |
865 | } | |
866 | ||
867 | return retval; | |
868 | } | |
869 | ||
870 | static int phy_reset(struct net_device *dev) | |
871 | { | |
ac9c1897 | 872 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
873 | u32 miicontrol; |
874 | unsigned int tries = 0; | |
875 | ||
876 | miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
877 | miicontrol |= BMCR_RESET; | |
878 | if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { | |
879 | return -1; | |
880 | } | |
881 | ||
882 | /* wait for 500ms */ | |
883 | msleep(500); | |
884 | ||
885 | /* must wait till reset is deasserted */ | |
886 | while (miicontrol & BMCR_RESET) { | |
887 | msleep(10); | |
888 | miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
889 | /* FIXME: 100 tries seem excessive */ | |
890 | if (tries++ > 100) | |
891 | return -1; | |
892 | } | |
893 | return 0; | |
894 | } | |
895 | ||
896 | static int phy_init(struct net_device *dev) | |
897 | { | |
898 | struct fe_priv *np = get_nvpriv(dev); | |
899 | u8 __iomem *base = get_hwbase(dev); | |
900 | u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; | |
901 | ||
902 | /* set advertise register */ | |
903 | reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
eb91f61b | 904 | reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); |
1da177e4 LT |
905 | if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { |
906 | printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); | |
907 | return PHY_ERROR; | |
908 | } | |
909 | ||
910 | /* get phy interface type */ | |
911 | phyinterface = readl(base + NvRegPhyInterface); | |
912 | ||
913 | /* see if gigabit phy */ | |
914 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
915 | if (mii_status & PHY_GIGABIT) { | |
916 | np->gigabit = PHY_GIGABIT; | |
eb91f61b | 917 | mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1da177e4 LT |
918 | mii_control_1000 &= ~ADVERTISE_1000HALF; |
919 | if (phyinterface & PHY_RGMII) | |
920 | mii_control_1000 |= ADVERTISE_1000FULL; | |
921 | else | |
922 | mii_control_1000 &= ~ADVERTISE_1000FULL; | |
923 | ||
eb91f61b | 924 | if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { |
1da177e4 LT |
925 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); |
926 | return PHY_ERROR; | |
927 | } | |
928 | } | |
929 | else | |
930 | np->gigabit = 0; | |
931 | ||
932 | /* reset the phy */ | |
933 | if (phy_reset(dev)) { | |
934 | printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); | |
935 | return PHY_ERROR; | |
936 | } | |
937 | ||
938 | /* phy vendor specific configuration */ | |
939 | if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { | |
940 | phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); | |
941 | phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); | |
942 | phy_reserved |= (PHY_INIT3 | PHY_INIT4); | |
943 | if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { | |
944 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
945 | return PHY_ERROR; | |
946 | } | |
947 | phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); | |
948 | phy_reserved |= PHY_INIT5; | |
949 | if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { | |
950 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
951 | return PHY_ERROR; | |
952 | } | |
953 | } | |
954 | if (np->phy_oui == PHY_OUI_CICADA) { | |
955 | phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); | |
956 | phy_reserved |= PHY_INIT6; | |
957 | if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { | |
958 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
959 | return PHY_ERROR; | |
960 | } | |
961 | } | |
eb91f61b AA |
962 | /* some phys clear out pause advertisment on reset, set it back */ |
963 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); | |
1da177e4 LT |
964 | |
965 | /* restart auto negotiation */ | |
966 | mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
967 | mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); | |
968 | if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { | |
969 | return PHY_ERROR; | |
970 | } | |
971 | ||
972 | return 0; | |
973 | } | |
974 | ||
975 | static void nv_start_rx(struct net_device *dev) | |
976 | { | |
ac9c1897 | 977 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
978 | u8 __iomem *base = get_hwbase(dev); |
979 | ||
980 | dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); | |
981 | /* Already running? Stop it. */ | |
982 | if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { | |
983 | writel(0, base + NvRegReceiverControl); | |
984 | pci_push(base); | |
985 | } | |
986 | writel(np->linkspeed, base + NvRegLinkSpeed); | |
987 | pci_push(base); | |
988 | writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); | |
989 | dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", | |
990 | dev->name, np->duplex, np->linkspeed); | |
991 | pci_push(base); | |
992 | } | |
993 | ||
994 | static void nv_stop_rx(struct net_device *dev) | |
995 | { | |
996 | u8 __iomem *base = get_hwbase(dev); | |
997 | ||
998 | dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); | |
999 | writel(0, base + NvRegReceiverControl); | |
1000 | reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, | |
1001 | NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, | |
1002 | KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); | |
1003 | ||
1004 | udelay(NV_RXSTOP_DELAY2); | |
1005 | writel(0, base + NvRegLinkSpeed); | |
1006 | } | |
1007 | ||
1008 | static void nv_start_tx(struct net_device *dev) | |
1009 | { | |
1010 | u8 __iomem *base = get_hwbase(dev); | |
1011 | ||
1012 | dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); | |
1013 | writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); | |
1014 | pci_push(base); | |
1015 | } | |
1016 | ||
1017 | static void nv_stop_tx(struct net_device *dev) | |
1018 | { | |
1019 | u8 __iomem *base = get_hwbase(dev); | |
1020 | ||
1021 | dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); | |
1022 | writel(0, base + NvRegTransmitterControl); | |
1023 | reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, | |
1024 | NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, | |
1025 | KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); | |
1026 | ||
1027 | udelay(NV_TXSTOP_DELAY2); | |
1028 | writel(0, base + NvRegUnknownTransmitterReg); | |
1029 | } | |
1030 | ||
1031 | static void nv_txrx_reset(struct net_device *dev) | |
1032 | { | |
ac9c1897 | 1033 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1034 | u8 __iomem *base = get_hwbase(dev); |
1035 | ||
1036 | dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); | |
8a4ae7f2 | 1037 | writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); |
1da177e4 LT |
1038 | pci_push(base); |
1039 | udelay(NV_TXRX_RESET_DELAY); | |
8a4ae7f2 | 1040 | writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); |
1da177e4 LT |
1041 | pci_push(base); |
1042 | } | |
1043 | ||
86a0f043 AA |
1044 | static void nv_mac_reset(struct net_device *dev) |
1045 | { | |
1046 | struct fe_priv *np = netdev_priv(dev); | |
1047 | u8 __iomem *base = get_hwbase(dev); | |
1048 | ||
1049 | dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); | |
1050 | writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); | |
1051 | pci_push(base); | |
1052 | writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); | |
1053 | pci_push(base); | |
1054 | udelay(NV_MAC_RESET_DELAY); | |
1055 | writel(0, base + NvRegMacReset); | |
1056 | pci_push(base); | |
1057 | udelay(NV_MAC_RESET_DELAY); | |
1058 | writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); | |
1059 | pci_push(base); | |
1060 | } | |
1061 | ||
1da177e4 LT |
1062 | /* |
1063 | * nv_get_stats: dev->get_stats function | |
1064 | * Get latest stats value from the nic. | |
1065 | * Called with read_lock(&dev_base_lock) held for read - | |
1066 | * only synchronized against unregister_netdevice. | |
1067 | */ | |
1068 | static struct net_device_stats *nv_get_stats(struct net_device *dev) | |
1069 | { | |
ac9c1897 | 1070 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1071 | |
1072 | /* It seems that the nic always generates interrupts and doesn't | |
1073 | * accumulate errors internally. Thus the current values in np->stats | |
1074 | * are already up to date. | |
1075 | */ | |
1076 | return &np->stats; | |
1077 | } | |
1078 | ||
1079 | /* | |
1080 | * nv_alloc_rx: fill rx ring entries. | |
1081 | * Return 1 if the allocations for the skbs failed and the | |
1082 | * rx engine is without Available descriptors | |
1083 | */ | |
1084 | static int nv_alloc_rx(struct net_device *dev) | |
1085 | { | |
ac9c1897 | 1086 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1087 | unsigned int refill_rx = np->refill_rx; |
1088 | int nr; | |
1089 | ||
1090 | while (np->cur_rx != refill_rx) { | |
1091 | struct sk_buff *skb; | |
1092 | ||
eafa59f6 | 1093 | nr = refill_rx % np->rx_ring_size; |
1da177e4 LT |
1094 | if (np->rx_skbuff[nr] == NULL) { |
1095 | ||
d81c0983 | 1096 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
1da177e4 LT |
1097 | if (!skb) |
1098 | break; | |
1099 | ||
1100 | skb->dev = dev; | |
1101 | np->rx_skbuff[nr] = skb; | |
1102 | } else { | |
1103 | skb = np->rx_skbuff[nr]; | |
1104 | } | |
1836098f MS |
1105 | np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, |
1106 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | |
ee73362c MS |
1107 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1108 | np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); | |
1109 | wmb(); | |
1110 | np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | |
1111 | } else { | |
1112 | np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; | |
1113 | np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; | |
1114 | wmb(); | |
1115 | np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); | |
1116 | } | |
1da177e4 LT |
1117 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", |
1118 | dev->name, refill_rx); | |
1119 | refill_rx++; | |
1120 | } | |
1121 | np->refill_rx = refill_rx; | |
eafa59f6 | 1122 | if (np->cur_rx - refill_rx == np->rx_ring_size) |
1da177e4 LT |
1123 | return 1; |
1124 | return 0; | |
1125 | } | |
1126 | ||
1127 | static void nv_do_rx_refill(unsigned long data) | |
1128 | { | |
1129 | struct net_device *dev = (struct net_device *) data; | |
ac9c1897 | 1130 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1131 | |
84b3932b AA |
1132 | if (!using_multi_irqs(dev)) { |
1133 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
1134 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
1135 | else | |
1136 | disable_irq(dev->irq); | |
d33a73c8 AA |
1137 | } else { |
1138 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
1139 | } | |
1da177e4 | 1140 | if (nv_alloc_rx(dev)) { |
84b3932b | 1141 | spin_lock_irq(&np->lock); |
1da177e4 LT |
1142 | if (!np->in_shutdown) |
1143 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
84b3932b | 1144 | spin_unlock_irq(&np->lock); |
1da177e4 | 1145 | } |
84b3932b AA |
1146 | if (!using_multi_irqs(dev)) { |
1147 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
1148 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
1149 | else | |
1150 | enable_irq(dev->irq); | |
d33a73c8 AA |
1151 | } else { |
1152 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
1153 | } | |
1da177e4 LT |
1154 | } |
1155 | ||
f3b197ac | 1156 | static void nv_init_rx(struct net_device *dev) |
1da177e4 | 1157 | { |
ac9c1897 | 1158 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1159 | int i; |
1160 | ||
eafa59f6 | 1161 | np->cur_rx = np->rx_ring_size; |
1da177e4 | 1162 | np->refill_rx = 0; |
eafa59f6 | 1163 | for (i = 0; i < np->rx_ring_size; i++) |
ee73362c MS |
1164 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1165 | np->rx_ring.orig[i].FlagLen = 0; | |
1166 | else | |
1167 | np->rx_ring.ex[i].FlagLen = 0; | |
d81c0983 MS |
1168 | } |
1169 | ||
1170 | static void nv_init_tx(struct net_device *dev) | |
1171 | { | |
ac9c1897 | 1172 | struct fe_priv *np = netdev_priv(dev); |
d81c0983 MS |
1173 | int i; |
1174 | ||
1175 | np->next_tx = np->nic_tx = 0; | |
eafa59f6 | 1176 | for (i = 0; i < np->tx_ring_size; i++) { |
ee73362c MS |
1177 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1178 | np->tx_ring.orig[i].FlagLen = 0; | |
1179 | else | |
1180 | np->tx_ring.ex[i].FlagLen = 0; | |
ac9c1897 | 1181 | np->tx_skbuff[i] = NULL; |
fa45459e | 1182 | np->tx_dma[i] = 0; |
ac9c1897 | 1183 | } |
d81c0983 MS |
1184 | } |
1185 | ||
1186 | static int nv_init_ring(struct net_device *dev) | |
1187 | { | |
1188 | nv_init_tx(dev); | |
1189 | nv_init_rx(dev); | |
1da177e4 LT |
1190 | return nv_alloc_rx(dev); |
1191 | } | |
1192 | ||
fa45459e | 1193 | static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) |
ac9c1897 AA |
1194 | { |
1195 | struct fe_priv *np = netdev_priv(dev); | |
fa45459e AA |
1196 | |
1197 | dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", | |
1198 | dev->name, skbnr); | |
1199 | ||
1200 | if (np->tx_dma[skbnr]) { | |
1201 | pci_unmap_page(np->pci_dev, np->tx_dma[skbnr], | |
1202 | np->tx_dma_len[skbnr], | |
1203 | PCI_DMA_TODEVICE); | |
1204 | np->tx_dma[skbnr] = 0; | |
1205 | } | |
1206 | ||
1207 | if (np->tx_skbuff[skbnr]) { | |
d33a73c8 | 1208 | dev_kfree_skb_any(np->tx_skbuff[skbnr]); |
fa45459e AA |
1209 | np->tx_skbuff[skbnr] = NULL; |
1210 | return 1; | |
1211 | } else { | |
1212 | return 0; | |
ac9c1897 | 1213 | } |
ac9c1897 AA |
1214 | } |
1215 | ||
1da177e4 LT |
1216 | static void nv_drain_tx(struct net_device *dev) |
1217 | { | |
ac9c1897 AA |
1218 | struct fe_priv *np = netdev_priv(dev); |
1219 | unsigned int i; | |
f3b197ac | 1220 | |
eafa59f6 | 1221 | for (i = 0; i < np->tx_ring_size; i++) { |
ee73362c MS |
1222 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1223 | np->tx_ring.orig[i].FlagLen = 0; | |
1224 | else | |
1225 | np->tx_ring.ex[i].FlagLen = 0; | |
fa45459e | 1226 | if (nv_release_txskb(dev, i)) |
1da177e4 | 1227 | np->stats.tx_dropped++; |
1da177e4 LT |
1228 | } |
1229 | } | |
1230 | ||
1231 | static void nv_drain_rx(struct net_device *dev) | |
1232 | { | |
ac9c1897 | 1233 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1234 | int i; |
eafa59f6 | 1235 | for (i = 0; i < np->rx_ring_size; i++) { |
ee73362c MS |
1236 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1237 | np->rx_ring.orig[i].FlagLen = 0; | |
1238 | else | |
1239 | np->rx_ring.ex[i].FlagLen = 0; | |
1da177e4 LT |
1240 | wmb(); |
1241 | if (np->rx_skbuff[i]) { | |
1242 | pci_unmap_single(np->pci_dev, np->rx_dma[i], | |
1836098f | 1243 | np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, |
1da177e4 LT |
1244 | PCI_DMA_FROMDEVICE); |
1245 | dev_kfree_skb(np->rx_skbuff[i]); | |
1246 | np->rx_skbuff[i] = NULL; | |
1247 | } | |
1248 | } | |
1249 | } | |
1250 | ||
1251 | static void drain_ring(struct net_device *dev) | |
1252 | { | |
1253 | nv_drain_tx(dev); | |
1254 | nv_drain_rx(dev); | |
1255 | } | |
1256 | ||
1257 | /* | |
1258 | * nv_start_xmit: dev->hard_start_xmit function | |
1259 | * Called with dev->xmit_lock held. | |
1260 | */ | |
1261 | static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1262 | { | |
ac9c1897 | 1263 | struct fe_priv *np = netdev_priv(dev); |
fa45459e | 1264 | u32 tx_flags = 0; |
ac9c1897 AA |
1265 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); |
1266 | unsigned int fragments = skb_shinfo(skb)->nr_frags; | |
eafa59f6 AA |
1267 | unsigned int nr = (np->next_tx - 1) % np->tx_ring_size; |
1268 | unsigned int start_nr = np->next_tx % np->tx_ring_size; | |
ac9c1897 | 1269 | unsigned int i; |
fa45459e AA |
1270 | u32 offset = 0; |
1271 | u32 bcnt; | |
1272 | u32 size = skb->len-skb->data_len; | |
1273 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | |
ee407b02 | 1274 | u32 tx_flags_vlan = 0; |
fa45459e AA |
1275 | |
1276 | /* add fragments to entries count */ | |
1277 | for (i = 0; i < fragments; i++) { | |
1278 | entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + | |
1279 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | |
1280 | } | |
ac9c1897 AA |
1281 | |
1282 | spin_lock_irq(&np->lock); | |
1283 | ||
eafa59f6 | 1284 | if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { |
ac9c1897 AA |
1285 | spin_unlock_irq(&np->lock); |
1286 | netif_stop_queue(dev); | |
1287 | return NETDEV_TX_BUSY; | |
1288 | } | |
1da177e4 | 1289 | |
fa45459e AA |
1290 | /* setup the header buffer */ |
1291 | do { | |
1292 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | |
eafa59f6 | 1293 | nr = (nr + 1) % np->tx_ring_size; |
fa45459e AA |
1294 | |
1295 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, | |
1296 | PCI_DMA_TODEVICE); | |
1297 | np->tx_dma_len[nr] = bcnt; | |
1298 | ||
1299 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
1300 | np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); | |
1301 | np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); | |
1302 | } else { | |
1303 | np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; | |
1304 | np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; | |
1305 | np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); | |
1306 | } | |
1307 | tx_flags = np->tx_flags; | |
1308 | offset += bcnt; | |
1309 | size -= bcnt; | |
1310 | } while(size); | |
1311 | ||
1312 | /* setup the fragments */ | |
1313 | for (i = 0; i < fragments; i++) { | |
1314 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1315 | u32 size = frag->size; | |
1316 | offset = 0; | |
1317 | ||
1318 | do { | |
1319 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | |
eafa59f6 | 1320 | nr = (nr + 1) % np->tx_ring_size; |
fa45459e AA |
1321 | |
1322 | np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, | |
1323 | PCI_DMA_TODEVICE); | |
1324 | np->tx_dma_len[nr] = bcnt; | |
1da177e4 | 1325 | |
ac9c1897 AA |
1326 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1327 | np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); | |
fa45459e | 1328 | np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); |
ac9c1897 AA |
1329 | } else { |
1330 | np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; | |
1331 | np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; | |
fa45459e | 1332 | np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); |
ac9c1897 | 1333 | } |
fa45459e AA |
1334 | offset += bcnt; |
1335 | size -= bcnt; | |
1336 | } while (size); | |
1337 | } | |
ac9c1897 | 1338 | |
fa45459e AA |
1339 | /* set last fragment flag */ |
1340 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
1341 | np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); | |
1342 | } else { | |
1343 | np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); | |
ac9c1897 AA |
1344 | } |
1345 | ||
fa45459e AA |
1346 | np->tx_skbuff[nr] = skb; |
1347 | ||
ac9c1897 AA |
1348 | #ifdef NETIF_F_TSO |
1349 | if (skb_shinfo(skb)->tso_size) | |
fa45459e | 1350 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT); |
ac9c1897 AA |
1351 | else |
1352 | #endif | |
fa45459e | 1353 | tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); |
ac9c1897 | 1354 | |
ee407b02 AA |
1355 | /* vlan tag */ |
1356 | if (np->vlangrp && vlan_tx_tag_present(skb)) { | |
1357 | tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); | |
1358 | } | |
1359 | ||
fa45459e | 1360 | /* set tx flags */ |
ac9c1897 | 1361 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
fa45459e | 1362 | np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
ac9c1897 | 1363 | } else { |
ee407b02 | 1364 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); |
fa45459e | 1365 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
f3b197ac | 1366 | } |
1da177e4 | 1367 | |
fa45459e AA |
1368 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", |
1369 | dev->name, np->next_tx, entries, tx_flags_extra); | |
1da177e4 LT |
1370 | { |
1371 | int j; | |
1372 | for (j=0; j<64; j++) { | |
1373 | if ((j%16) == 0) | |
1374 | dprintk("\n%03x:", j); | |
1375 | dprintk(" %02x", ((unsigned char*)skb->data)[j]); | |
1376 | } | |
1377 | dprintk("\n"); | |
1378 | } | |
1379 | ||
fa45459e | 1380 | np->next_tx += entries; |
1da177e4 LT |
1381 | |
1382 | dev->trans_start = jiffies; | |
1da177e4 | 1383 | spin_unlock_irq(&np->lock); |
8a4ae7f2 | 1384 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
1da177e4 | 1385 | pci_push(get_hwbase(dev)); |
ac9c1897 | 1386 | return NETDEV_TX_OK; |
1da177e4 LT |
1387 | } |
1388 | ||
1389 | /* | |
1390 | * nv_tx_done: check for completed packets, release the skbs. | |
1391 | * | |
1392 | * Caller must own np->lock. | |
1393 | */ | |
1394 | static void nv_tx_done(struct net_device *dev) | |
1395 | { | |
ac9c1897 | 1396 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1397 | u32 Flags; |
ac9c1897 AA |
1398 | unsigned int i; |
1399 | struct sk_buff *skb; | |
1da177e4 LT |
1400 | |
1401 | while (np->nic_tx != np->next_tx) { | |
eafa59f6 | 1402 | i = np->nic_tx % np->tx_ring_size; |
1da177e4 | 1403 | |
ee73362c MS |
1404 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1405 | Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); | |
1406 | else | |
1407 | Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); | |
1da177e4 LT |
1408 | |
1409 | dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", | |
1410 | dev->name, np->nic_tx, Flags); | |
1411 | if (Flags & NV_TX_VALID) | |
1412 | break; | |
1413 | if (np->desc_ver == DESC_VER_1) { | |
ac9c1897 AA |
1414 | if (Flags & NV_TX_LASTPACKET) { |
1415 | skb = np->tx_skbuff[i]; | |
1416 | if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| | |
1417 | NV_TX_UNDERFLOW|NV_TX_ERROR)) { | |
1418 | if (Flags & NV_TX_UNDERFLOW) | |
1419 | np->stats.tx_fifo_errors++; | |
1420 | if (Flags & NV_TX_CARRIERLOST) | |
1421 | np->stats.tx_carrier_errors++; | |
1422 | np->stats.tx_errors++; | |
1423 | } else { | |
1424 | np->stats.tx_packets++; | |
1425 | np->stats.tx_bytes += skb->len; | |
1426 | } | |
1da177e4 LT |
1427 | } |
1428 | } else { | |
ac9c1897 AA |
1429 | if (Flags & NV_TX2_LASTPACKET) { |
1430 | skb = np->tx_skbuff[i]; | |
1431 | if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| | |
1432 | NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { | |
1433 | if (Flags & NV_TX2_UNDERFLOW) | |
1434 | np->stats.tx_fifo_errors++; | |
1435 | if (Flags & NV_TX2_CARRIERLOST) | |
1436 | np->stats.tx_carrier_errors++; | |
1437 | np->stats.tx_errors++; | |
1438 | } else { | |
1439 | np->stats.tx_packets++; | |
1440 | np->stats.tx_bytes += skb->len; | |
f3b197ac | 1441 | } |
1da177e4 LT |
1442 | } |
1443 | } | |
fa45459e | 1444 | nv_release_txskb(dev, i); |
1da177e4 LT |
1445 | np->nic_tx++; |
1446 | } | |
eafa59f6 | 1447 | if (np->next_tx - np->nic_tx < np->tx_limit_start) |
1da177e4 LT |
1448 | netif_wake_queue(dev); |
1449 | } | |
1450 | ||
1451 | /* | |
1452 | * nv_tx_timeout: dev->tx_timeout function | |
1453 | * Called with dev->xmit_lock held. | |
1454 | */ | |
1455 | static void nv_tx_timeout(struct net_device *dev) | |
1456 | { | |
ac9c1897 | 1457 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1458 | u8 __iomem *base = get_hwbase(dev); |
d33a73c8 AA |
1459 | u32 status; |
1460 | ||
1461 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
1462 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
1463 | else | |
1464 | status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
1da177e4 | 1465 | |
d33a73c8 | 1466 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); |
1da177e4 | 1467 | |
c2dba06d MS |
1468 | { |
1469 | int i; | |
1470 | ||
1471 | printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", | |
1472 | dev->name, (unsigned long)np->ring_addr, | |
1473 | np->next_tx, np->nic_tx); | |
1474 | printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); | |
86a0f043 | 1475 | for (i=0;i<=np->register_size;i+= 32) { |
c2dba06d MS |
1476 | printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", |
1477 | i, | |
1478 | readl(base + i + 0), readl(base + i + 4), | |
1479 | readl(base + i + 8), readl(base + i + 12), | |
1480 | readl(base + i + 16), readl(base + i + 20), | |
1481 | readl(base + i + 24), readl(base + i + 28)); | |
1482 | } | |
1483 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); | |
eafa59f6 | 1484 | for (i=0;i<np->tx_ring_size;i+= 4) { |
ee73362c MS |
1485 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1486 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | |
f3b197ac | 1487 | i, |
ee73362c MS |
1488 | le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), |
1489 | le32_to_cpu(np->tx_ring.orig[i].FlagLen), | |
1490 | le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), | |
1491 | le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), | |
1492 | le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), | |
1493 | le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), | |
1494 | le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), | |
1495 | le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); | |
1496 | } else { | |
1497 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", | |
f3b197ac | 1498 | i, |
ee73362c MS |
1499 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), |
1500 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), | |
1501 | le32_to_cpu(np->tx_ring.ex[i].FlagLen), | |
1502 | le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), | |
1503 | le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), | |
1504 | le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), | |
1505 | le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), | |
1506 | le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), | |
1507 | le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), | |
1508 | le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), | |
1509 | le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), | |
1510 | le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); | |
1511 | } | |
c2dba06d MS |
1512 | } |
1513 | } | |
1514 | ||
1da177e4 LT |
1515 | spin_lock_irq(&np->lock); |
1516 | ||
1517 | /* 1) stop tx engine */ | |
1518 | nv_stop_tx(dev); | |
1519 | ||
1520 | /* 2) check that the packets were not sent already: */ | |
1521 | nv_tx_done(dev); | |
1522 | ||
1523 | /* 3) if there are dead entries: clear everything */ | |
1524 | if (np->next_tx != np->nic_tx) { | |
1525 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); | |
1526 | nv_drain_tx(dev); | |
1527 | np->next_tx = np->nic_tx = 0; | |
0832b25a | 1528 | setup_hw_rings(dev, NV_SETUP_TX_RING); |
1da177e4 LT |
1529 | netif_wake_queue(dev); |
1530 | } | |
1531 | ||
1532 | /* 4) restart tx engine */ | |
1533 | nv_start_tx(dev); | |
1534 | spin_unlock_irq(&np->lock); | |
1535 | } | |
1536 | ||
22c6d143 MS |
1537 | /* |
1538 | * Called when the nic notices a mismatch between the actual data len on the | |
1539 | * wire and the len indicated in the 802 header | |
1540 | */ | |
1541 | static int nv_getlen(struct net_device *dev, void *packet, int datalen) | |
1542 | { | |
1543 | int hdrlen; /* length of the 802 header */ | |
1544 | int protolen; /* length as stored in the proto field */ | |
1545 | ||
1546 | /* 1) calculate len according to header */ | |
1547 | if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { | |
1548 | protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); | |
1549 | hdrlen = VLAN_HLEN; | |
1550 | } else { | |
1551 | protolen = ntohs( ((struct ethhdr *)packet)->h_proto); | |
1552 | hdrlen = ETH_HLEN; | |
1553 | } | |
1554 | dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", | |
1555 | dev->name, datalen, protolen, hdrlen); | |
1556 | if (protolen > ETH_DATA_LEN) | |
1557 | return datalen; /* Value in proto field not a len, no checks possible */ | |
1558 | ||
1559 | protolen += hdrlen; | |
1560 | /* consistency checks: */ | |
1561 | if (datalen > ETH_ZLEN) { | |
1562 | if (datalen >= protolen) { | |
1563 | /* more data on wire than in 802 header, trim of | |
1564 | * additional data. | |
1565 | */ | |
1566 | dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", | |
1567 | dev->name, protolen); | |
1568 | return protolen; | |
1569 | } else { | |
1570 | /* less data on wire than mentioned in header. | |
1571 | * Discard the packet. | |
1572 | */ | |
1573 | dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", | |
1574 | dev->name); | |
1575 | return -1; | |
1576 | } | |
1577 | } else { | |
1578 | /* short packet. Accept only if 802 values are also short */ | |
1579 | if (protolen > ETH_ZLEN) { | |
1580 | dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", | |
1581 | dev->name); | |
1582 | return -1; | |
1583 | } | |
1584 | dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", | |
1585 | dev->name, datalen); | |
1586 | return datalen; | |
1587 | } | |
1588 | } | |
1589 | ||
1da177e4 LT |
1590 | static void nv_rx_process(struct net_device *dev) |
1591 | { | |
ac9c1897 | 1592 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1593 | u32 Flags; |
ee407b02 AA |
1594 | u32 vlanflags = 0; |
1595 | ||
1da177e4 LT |
1596 | for (;;) { |
1597 | struct sk_buff *skb; | |
1598 | int len; | |
1599 | int i; | |
eafa59f6 | 1600 | if (np->cur_rx - np->refill_rx >= np->rx_ring_size) |
1da177e4 LT |
1601 | break; /* we scanned the whole ring - do not continue */ |
1602 | ||
eafa59f6 | 1603 | i = np->cur_rx % np->rx_ring_size; |
ee73362c MS |
1604 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1605 | Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); | |
1606 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); | |
1607 | } else { | |
1608 | Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); | |
1609 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); | |
ee407b02 | 1610 | vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); |
ee73362c | 1611 | } |
1da177e4 LT |
1612 | |
1613 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", | |
1614 | dev->name, np->cur_rx, Flags); | |
1615 | ||
1616 | if (Flags & NV_RX_AVAIL) | |
1617 | break; /* still owned by hardware, */ | |
1618 | ||
1619 | /* | |
1620 | * the packet is for us - immediately tear down the pci mapping. | |
1621 | * TODO: check if a prefetch of the first cacheline improves | |
1622 | * the performance. | |
1623 | */ | |
1624 | pci_unmap_single(np->pci_dev, np->rx_dma[i], | |
1836098f | 1625 | np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, |
1da177e4 LT |
1626 | PCI_DMA_FROMDEVICE); |
1627 | ||
1628 | { | |
1629 | int j; | |
1630 | dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); | |
1631 | for (j=0; j<64; j++) { | |
1632 | if ((j%16) == 0) | |
1633 | dprintk("\n%03x:", j); | |
1634 | dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); | |
1635 | } | |
1636 | dprintk("\n"); | |
1637 | } | |
1638 | /* look at what we actually got: */ | |
1639 | if (np->desc_ver == DESC_VER_1) { | |
1640 | if (!(Flags & NV_RX_DESCRIPTORVALID)) | |
1641 | goto next_pkt; | |
1642 | ||
a971c324 AA |
1643 | if (Flags & NV_RX_ERROR) { |
1644 | if (Flags & NV_RX_MISSEDFRAME) { | |
1645 | np->stats.rx_missed_errors++; | |
1da177e4 LT |
1646 | np->stats.rx_errors++; |
1647 | goto next_pkt; | |
1648 | } | |
a971c324 AA |
1649 | if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { |
1650 | np->stats.rx_errors++; | |
1651 | goto next_pkt; | |
1652 | } | |
1653 | if (Flags & NV_RX_CRCERR) { | |
1654 | np->stats.rx_crc_errors++; | |
1655 | np->stats.rx_errors++; | |
1656 | goto next_pkt; | |
1657 | } | |
1658 | if (Flags & NV_RX_OVERFLOW) { | |
1659 | np->stats.rx_over_errors++; | |
1660 | np->stats.rx_errors++; | |
1661 | goto next_pkt; | |
1662 | } | |
1663 | if (Flags & NV_RX_ERROR4) { | |
1664 | len = nv_getlen(dev, np->rx_skbuff[i]->data, len); | |
1665 | if (len < 0) { | |
1666 | np->stats.rx_errors++; | |
1667 | goto next_pkt; | |
1668 | } | |
1669 | } | |
1670 | /* framing errors are soft errors. */ | |
1671 | if (Flags & NV_RX_FRAMINGERR) { | |
1672 | if (Flags & NV_RX_SUBSTRACT1) { | |
1673 | len--; | |
1674 | } | |
22c6d143 MS |
1675 | } |
1676 | } | |
1da177e4 LT |
1677 | } else { |
1678 | if (!(Flags & NV_RX2_DESCRIPTORVALID)) | |
1679 | goto next_pkt; | |
1680 | ||
a971c324 AA |
1681 | if (Flags & NV_RX2_ERROR) { |
1682 | if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { | |
1da177e4 LT |
1683 | np->stats.rx_errors++; |
1684 | goto next_pkt; | |
1685 | } | |
a971c324 AA |
1686 | if (Flags & NV_RX2_CRCERR) { |
1687 | np->stats.rx_crc_errors++; | |
1688 | np->stats.rx_errors++; | |
1689 | goto next_pkt; | |
1690 | } | |
1691 | if (Flags & NV_RX2_OVERFLOW) { | |
1692 | np->stats.rx_over_errors++; | |
1693 | np->stats.rx_errors++; | |
1694 | goto next_pkt; | |
1695 | } | |
1696 | if (Flags & NV_RX2_ERROR4) { | |
1697 | len = nv_getlen(dev, np->rx_skbuff[i]->data, len); | |
1698 | if (len < 0) { | |
1699 | np->stats.rx_errors++; | |
1700 | goto next_pkt; | |
1701 | } | |
1702 | } | |
1703 | /* framing errors are soft errors */ | |
1704 | if (Flags & NV_RX2_FRAMINGERR) { | |
1705 | if (Flags & NV_RX2_SUBSTRACT1) { | |
1706 | len--; | |
1707 | } | |
22c6d143 MS |
1708 | } |
1709 | } | |
1da177e4 LT |
1710 | Flags &= NV_RX2_CHECKSUMMASK; |
1711 | if (Flags == NV_RX2_CHECKSUMOK1 || | |
1712 | Flags == NV_RX2_CHECKSUMOK2 || | |
1713 | Flags == NV_RX2_CHECKSUMOK3) { | |
1714 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); | |
1715 | np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; | |
1716 | } else { | |
1717 | dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); | |
1718 | } | |
1719 | } | |
1720 | /* got a valid packet - forward it to the network core */ | |
1721 | skb = np->rx_skbuff[i]; | |
1722 | np->rx_skbuff[i] = NULL; | |
1723 | ||
1724 | skb_put(skb, len); | |
1725 | skb->protocol = eth_type_trans(skb, dev); | |
1726 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", | |
1727 | dev->name, np->cur_rx, len, skb->protocol); | |
ee407b02 AA |
1728 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { |
1729 | vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); | |
1730 | } else { | |
1731 | netif_rx(skb); | |
1732 | } | |
1da177e4 LT |
1733 | dev->last_rx = jiffies; |
1734 | np->stats.rx_packets++; | |
1735 | np->stats.rx_bytes += len; | |
1736 | next_pkt: | |
1737 | np->cur_rx++; | |
1738 | } | |
1739 | } | |
1740 | ||
d81c0983 MS |
1741 | static void set_bufsize(struct net_device *dev) |
1742 | { | |
1743 | struct fe_priv *np = netdev_priv(dev); | |
1744 | ||
1745 | if (dev->mtu <= ETH_DATA_LEN) | |
1746 | np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; | |
1747 | else | |
1748 | np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; | |
1749 | } | |
1750 | ||
1da177e4 LT |
1751 | /* |
1752 | * nv_change_mtu: dev->change_mtu function | |
1753 | * Called with dev_base_lock held for read. | |
1754 | */ | |
1755 | static int nv_change_mtu(struct net_device *dev, int new_mtu) | |
1756 | { | |
ac9c1897 | 1757 | struct fe_priv *np = netdev_priv(dev); |
d81c0983 MS |
1758 | int old_mtu; |
1759 | ||
1760 | if (new_mtu < 64 || new_mtu > np->pkt_limit) | |
1da177e4 | 1761 | return -EINVAL; |
d81c0983 MS |
1762 | |
1763 | old_mtu = dev->mtu; | |
1da177e4 | 1764 | dev->mtu = new_mtu; |
d81c0983 MS |
1765 | |
1766 | /* return early if the buffer sizes will not change */ | |
1767 | if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) | |
1768 | return 0; | |
1769 | if (old_mtu == new_mtu) | |
1770 | return 0; | |
1771 | ||
1772 | /* synchronized against open : rtnl_lock() held by caller */ | |
1773 | if (netif_running(dev)) { | |
25097d4b | 1774 | u8 __iomem *base = get_hwbase(dev); |
d81c0983 MS |
1775 | /* |
1776 | * It seems that the nic preloads valid ring entries into an | |
1777 | * internal buffer. The procedure for flushing everything is | |
1778 | * guessed, there is probably a simpler approach. | |
1779 | * Changing the MTU is a rare event, it shouldn't matter. | |
1780 | */ | |
84b3932b | 1781 | nv_disable_irq(dev); |
d81c0983 MS |
1782 | spin_lock_bh(&dev->xmit_lock); |
1783 | spin_lock(&np->lock); | |
1784 | /* stop engines */ | |
1785 | nv_stop_rx(dev); | |
1786 | nv_stop_tx(dev); | |
1787 | nv_txrx_reset(dev); | |
1788 | /* drain rx queue */ | |
1789 | nv_drain_rx(dev); | |
1790 | nv_drain_tx(dev); | |
1791 | /* reinit driver view of the rx queue */ | |
d81c0983 | 1792 | set_bufsize(dev); |
eafa59f6 | 1793 | if (nv_init_ring(dev)) { |
d81c0983 MS |
1794 | if (!np->in_shutdown) |
1795 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
1796 | } | |
1797 | /* reinit nic view of the rx queue */ | |
1798 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
0832b25a | 1799 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
eafa59f6 | 1800 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
d81c0983 MS |
1801 | base + NvRegRingSizes); |
1802 | pci_push(base); | |
8a4ae7f2 | 1803 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
d81c0983 MS |
1804 | pci_push(base); |
1805 | ||
1806 | /* restart rx engine */ | |
1807 | nv_start_rx(dev); | |
1808 | nv_start_tx(dev); | |
1809 | spin_unlock(&np->lock); | |
1810 | spin_unlock_bh(&dev->xmit_lock); | |
84b3932b | 1811 | nv_enable_irq(dev); |
d81c0983 | 1812 | } |
1da177e4 LT |
1813 | return 0; |
1814 | } | |
1815 | ||
72b31782 MS |
1816 | static void nv_copy_mac_to_hw(struct net_device *dev) |
1817 | { | |
25097d4b | 1818 | u8 __iomem *base = get_hwbase(dev); |
72b31782 MS |
1819 | u32 mac[2]; |
1820 | ||
1821 | mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + | |
1822 | (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); | |
1823 | mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); | |
1824 | ||
1825 | writel(mac[0], base + NvRegMacAddrA); | |
1826 | writel(mac[1], base + NvRegMacAddrB); | |
1827 | } | |
1828 | ||
1829 | /* | |
1830 | * nv_set_mac_address: dev->set_mac_address function | |
1831 | * Called with rtnl_lock() held. | |
1832 | */ | |
1833 | static int nv_set_mac_address(struct net_device *dev, void *addr) | |
1834 | { | |
ac9c1897 | 1835 | struct fe_priv *np = netdev_priv(dev); |
72b31782 MS |
1836 | struct sockaddr *macaddr = (struct sockaddr*)addr; |
1837 | ||
1838 | if(!is_valid_ether_addr(macaddr->sa_data)) | |
1839 | return -EADDRNOTAVAIL; | |
1840 | ||
1841 | /* synchronized against open : rtnl_lock() held by caller */ | |
1842 | memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); | |
1843 | ||
1844 | if (netif_running(dev)) { | |
1845 | spin_lock_bh(&dev->xmit_lock); | |
1846 | spin_lock_irq(&np->lock); | |
1847 | ||
1848 | /* stop rx engine */ | |
1849 | nv_stop_rx(dev); | |
1850 | ||
1851 | /* set mac address */ | |
1852 | nv_copy_mac_to_hw(dev); | |
1853 | ||
1854 | /* restart rx engine */ | |
1855 | nv_start_rx(dev); | |
1856 | spin_unlock_irq(&np->lock); | |
1857 | spin_unlock_bh(&dev->xmit_lock); | |
1858 | } else { | |
1859 | nv_copy_mac_to_hw(dev); | |
1860 | } | |
1861 | return 0; | |
1862 | } | |
1863 | ||
1da177e4 LT |
1864 | /* |
1865 | * nv_set_multicast: dev->set_multicast function | |
1866 | * Called with dev->xmit_lock held. | |
1867 | */ | |
1868 | static void nv_set_multicast(struct net_device *dev) | |
1869 | { | |
ac9c1897 | 1870 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1871 | u8 __iomem *base = get_hwbase(dev); |
1872 | u32 addr[2]; | |
1873 | u32 mask[2]; | |
b6d0773f | 1874 | u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; |
1da177e4 LT |
1875 | |
1876 | memset(addr, 0, sizeof(addr)); | |
1877 | memset(mask, 0, sizeof(mask)); | |
1878 | ||
1879 | if (dev->flags & IFF_PROMISC) { | |
1880 | printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); | |
b6d0773f | 1881 | pff |= NVREG_PFF_PROMISC; |
1da177e4 | 1882 | } else { |
b6d0773f | 1883 | pff |= NVREG_PFF_MYADDR; |
1da177e4 LT |
1884 | |
1885 | if (dev->flags & IFF_ALLMULTI || dev->mc_list) { | |
1886 | u32 alwaysOff[2]; | |
1887 | u32 alwaysOn[2]; | |
1888 | ||
1889 | alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; | |
1890 | if (dev->flags & IFF_ALLMULTI) { | |
1891 | alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; | |
1892 | } else { | |
1893 | struct dev_mc_list *walk; | |
1894 | ||
1895 | walk = dev->mc_list; | |
1896 | while (walk != NULL) { | |
1897 | u32 a, b; | |
1898 | a = le32_to_cpu(*(u32 *) walk->dmi_addr); | |
1899 | b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); | |
1900 | alwaysOn[0] &= a; | |
1901 | alwaysOff[0] &= ~a; | |
1902 | alwaysOn[1] &= b; | |
1903 | alwaysOff[1] &= ~b; | |
1904 | walk = walk->next; | |
1905 | } | |
1906 | } | |
1907 | addr[0] = alwaysOn[0]; | |
1908 | addr[1] = alwaysOn[1]; | |
1909 | mask[0] = alwaysOn[0] | alwaysOff[0]; | |
1910 | mask[1] = alwaysOn[1] | alwaysOff[1]; | |
1911 | } | |
1912 | } | |
1913 | addr[0] |= NVREG_MCASTADDRA_FORCE; | |
1914 | pff |= NVREG_PFF_ALWAYS; | |
1915 | spin_lock_irq(&np->lock); | |
1916 | nv_stop_rx(dev); | |
1917 | writel(addr[0], base + NvRegMulticastAddrA); | |
1918 | writel(addr[1], base + NvRegMulticastAddrB); | |
1919 | writel(mask[0], base + NvRegMulticastMaskA); | |
1920 | writel(mask[1], base + NvRegMulticastMaskB); | |
1921 | writel(pff, base + NvRegPacketFilterFlags); | |
1922 | dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", | |
1923 | dev->name); | |
1924 | nv_start_rx(dev); | |
1925 | spin_unlock_irq(&np->lock); | |
1926 | } | |
1927 | ||
b6d0773f AA |
1928 | void nv_update_pause(struct net_device *dev, u32 pause_flags) |
1929 | { | |
1930 | struct fe_priv *np = netdev_priv(dev); | |
1931 | u8 __iomem *base = get_hwbase(dev); | |
1932 | ||
1933 | np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); | |
1934 | ||
1935 | if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { | |
1936 | u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; | |
1937 | if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { | |
1938 | writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); | |
1939 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
1940 | } else { | |
1941 | writel(pff, base + NvRegPacketFilterFlags); | |
1942 | } | |
1943 | } | |
1944 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { | |
1945 | u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; | |
1946 | if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { | |
1947 | writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); | |
1948 | writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); | |
1949 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
1950 | } else { | |
1951 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | |
1952 | writel(regmisc, base + NvRegMisc1); | |
1953 | } | |
1954 | } | |
1955 | } | |
1956 | ||
4ea7f299 AA |
1957 | /** |
1958 | * nv_update_linkspeed: Setup the MAC according to the link partner | |
1959 | * @dev: Network device to be configured | |
1960 | * | |
1961 | * The function queries the PHY and checks if there is a link partner. | |
1962 | * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is | |
1963 | * set to 10 MBit HD. | |
1964 | * | |
1965 | * The function returns 0 if there is no link partner and 1 if there is | |
1966 | * a good link partner. | |
1967 | */ | |
1da177e4 LT |
1968 | static int nv_update_linkspeed(struct net_device *dev) |
1969 | { | |
ac9c1897 | 1970 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1971 | u8 __iomem *base = get_hwbase(dev); |
eb91f61b AA |
1972 | int adv = 0; |
1973 | int lpa = 0; | |
1974 | int adv_lpa, adv_pause, lpa_pause; | |
1da177e4 LT |
1975 | int newls = np->linkspeed; |
1976 | int newdup = np->duplex; | |
1977 | int mii_status; | |
1978 | int retval = 0; | |
b6d0773f | 1979 | u32 control_1000, status_1000, phyreg, pause_flags; |
1da177e4 LT |
1980 | |
1981 | /* BMSR_LSTATUS is latched, read it twice: | |
1982 | * we want the current value. | |
1983 | */ | |
1984 | mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
1985 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
1986 | ||
1987 | if (!(mii_status & BMSR_LSTATUS)) { | |
1988 | dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", | |
1989 | dev->name); | |
1990 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
1991 | newdup = 0; | |
1992 | retval = 0; | |
1993 | goto set_speed; | |
1994 | } | |
1995 | ||
1996 | if (np->autoneg == 0) { | |
1997 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", | |
1998 | dev->name, np->fixed_mode); | |
1999 | if (np->fixed_mode & LPA_100FULL) { | |
2000 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | |
2001 | newdup = 1; | |
2002 | } else if (np->fixed_mode & LPA_100HALF) { | |
2003 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | |
2004 | newdup = 0; | |
2005 | } else if (np->fixed_mode & LPA_10FULL) { | |
2006 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2007 | newdup = 1; | |
2008 | } else { | |
2009 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2010 | newdup = 0; | |
2011 | } | |
2012 | retval = 1; | |
2013 | goto set_speed; | |
2014 | } | |
2015 | /* check auto negotiation is complete */ | |
2016 | if (!(mii_status & BMSR_ANEGCOMPLETE)) { | |
2017 | /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ | |
2018 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2019 | newdup = 0; | |
2020 | retval = 0; | |
2021 | dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); | |
2022 | goto set_speed; | |
2023 | } | |
2024 | ||
b6d0773f AA |
2025 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
2026 | lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); | |
2027 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", | |
2028 | dev->name, adv, lpa); | |
2029 | ||
1da177e4 LT |
2030 | retval = 1; |
2031 | if (np->gigabit == PHY_GIGABIT) { | |
eb91f61b AA |
2032 | control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
2033 | status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); | |
1da177e4 LT |
2034 | |
2035 | if ((control_1000 & ADVERTISE_1000FULL) && | |
2036 | (status_1000 & LPA_1000FULL)) { | |
2037 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", | |
2038 | dev->name); | |
2039 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; | |
2040 | newdup = 1; | |
2041 | goto set_speed; | |
2042 | } | |
2043 | } | |
2044 | ||
1da177e4 | 2045 | /* FIXME: handle parallel detection properly */ |
eb91f61b AA |
2046 | adv_lpa = lpa & adv; |
2047 | if (adv_lpa & LPA_100FULL) { | |
1da177e4 LT |
2048 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
2049 | newdup = 1; | |
eb91f61b | 2050 | } else if (adv_lpa & LPA_100HALF) { |
1da177e4 LT |
2051 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
2052 | newdup = 0; | |
eb91f61b | 2053 | } else if (adv_lpa & LPA_10FULL) { |
1da177e4 LT |
2054 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
2055 | newdup = 1; | |
eb91f61b | 2056 | } else if (adv_lpa & LPA_10HALF) { |
1da177e4 LT |
2057 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
2058 | newdup = 0; | |
2059 | } else { | |
eb91f61b | 2060 | dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); |
1da177e4 LT |
2061 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
2062 | newdup = 0; | |
2063 | } | |
2064 | ||
2065 | set_speed: | |
2066 | if (np->duplex == newdup && np->linkspeed == newls) | |
2067 | return retval; | |
2068 | ||
2069 | dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", | |
2070 | dev->name, np->linkspeed, np->duplex, newls, newdup); | |
2071 | ||
2072 | np->duplex = newdup; | |
2073 | np->linkspeed = newls; | |
2074 | ||
2075 | if (np->gigabit == PHY_GIGABIT) { | |
2076 | phyreg = readl(base + NvRegRandomSeed); | |
2077 | phyreg &= ~(0x3FF00); | |
2078 | if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) | |
2079 | phyreg |= NVREG_RNDSEED_FORCE3; | |
2080 | else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) | |
2081 | phyreg |= NVREG_RNDSEED_FORCE2; | |
2082 | else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) | |
2083 | phyreg |= NVREG_RNDSEED_FORCE; | |
2084 | writel(phyreg, base + NvRegRandomSeed); | |
2085 | } | |
2086 | ||
2087 | phyreg = readl(base + NvRegPhyInterface); | |
2088 | phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); | |
2089 | if (np->duplex == 0) | |
2090 | phyreg |= PHY_HALF; | |
2091 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) | |
2092 | phyreg |= PHY_100; | |
2093 | else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | |
2094 | phyreg |= PHY_1000; | |
2095 | writel(phyreg, base + NvRegPhyInterface); | |
2096 | ||
2097 | writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), | |
2098 | base + NvRegMisc1); | |
2099 | pci_push(base); | |
2100 | writel(np->linkspeed, base + NvRegLinkSpeed); | |
2101 | pci_push(base); | |
2102 | ||
b6d0773f AA |
2103 | pause_flags = 0; |
2104 | /* setup pause frame */ | |
eb91f61b | 2105 | if (np->duplex != 0) { |
b6d0773f AA |
2106 | if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { |
2107 | adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); | |
2108 | lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); | |
2109 | ||
2110 | switch (adv_pause) { | |
2111 | case (ADVERTISE_PAUSE_CAP): | |
2112 | if (lpa_pause & LPA_PAUSE_CAP) { | |
2113 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2114 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
2115 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2116 | } | |
2117 | break; | |
2118 | case (ADVERTISE_PAUSE_ASYM): | |
2119 | if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) | |
2120 | { | |
2121 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2122 | } | |
2123 | break; | |
2124 | case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): | |
2125 | if (lpa_pause & LPA_PAUSE_CAP) | |
2126 | { | |
2127 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2128 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
2129 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2130 | } | |
2131 | if (lpa_pause == LPA_PAUSE_ASYM) | |
2132 | { | |
2133 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2134 | } | |
2135 | break; | |
f3b197ac | 2136 | } |
eb91f61b | 2137 | } else { |
b6d0773f | 2138 | pause_flags = np->pause_flags; |
eb91f61b AA |
2139 | } |
2140 | } | |
b6d0773f | 2141 | nv_update_pause(dev, pause_flags); |
eb91f61b | 2142 | |
1da177e4 LT |
2143 | return retval; |
2144 | } | |
2145 | ||
2146 | static void nv_linkchange(struct net_device *dev) | |
2147 | { | |
2148 | if (nv_update_linkspeed(dev)) { | |
4ea7f299 | 2149 | if (!netif_carrier_ok(dev)) { |
1da177e4 LT |
2150 | netif_carrier_on(dev); |
2151 | printk(KERN_INFO "%s: link up.\n", dev->name); | |
4ea7f299 | 2152 | nv_start_rx(dev); |
1da177e4 | 2153 | } |
1da177e4 LT |
2154 | } else { |
2155 | if (netif_carrier_ok(dev)) { | |
2156 | netif_carrier_off(dev); | |
2157 | printk(KERN_INFO "%s: link down.\n", dev->name); | |
2158 | nv_stop_rx(dev); | |
2159 | } | |
2160 | } | |
2161 | } | |
2162 | ||
2163 | static void nv_link_irq(struct net_device *dev) | |
2164 | { | |
2165 | u8 __iomem *base = get_hwbase(dev); | |
2166 | u32 miistat; | |
2167 | ||
2168 | miistat = readl(base + NvRegMIIStatus); | |
2169 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
2170 | dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); | |
2171 | ||
2172 | if (miistat & (NVREG_MIISTAT_LINKCHANGE)) | |
2173 | nv_linkchange(dev); | |
2174 | dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); | |
2175 | } | |
2176 | ||
2177 | static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |
2178 | { | |
2179 | struct net_device *dev = (struct net_device *) data; | |
ac9c1897 | 2180 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
2181 | u8 __iomem *base = get_hwbase(dev); |
2182 | u32 events; | |
2183 | int i; | |
2184 | ||
2185 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); | |
2186 | ||
2187 | for (i=0; ; i++) { | |
d33a73c8 AA |
2188 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
2189 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
2190 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
2191 | } else { | |
2192 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
2193 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | |
2194 | } | |
1da177e4 LT |
2195 | pci_push(base); |
2196 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | |
2197 | if (!(events & np->irqmask)) | |
2198 | break; | |
2199 | ||
a971c324 AA |
2200 | spin_lock(&np->lock); |
2201 | nv_tx_done(dev); | |
2202 | spin_unlock(&np->lock); | |
f3b197ac | 2203 | |
a971c324 AA |
2204 | nv_rx_process(dev); |
2205 | if (nv_alloc_rx(dev)) { | |
1da177e4 | 2206 | spin_lock(&np->lock); |
a971c324 AA |
2207 | if (!np->in_shutdown) |
2208 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
1da177e4 LT |
2209 | spin_unlock(&np->lock); |
2210 | } | |
f3b197ac | 2211 | |
1da177e4 LT |
2212 | if (events & NVREG_IRQ_LINK) { |
2213 | spin_lock(&np->lock); | |
2214 | nv_link_irq(dev); | |
2215 | spin_unlock(&np->lock); | |
2216 | } | |
2217 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | |
2218 | spin_lock(&np->lock); | |
2219 | nv_linkchange(dev); | |
2220 | spin_unlock(&np->lock); | |
2221 | np->link_timeout = jiffies + LINK_TIMEOUT; | |
2222 | } | |
2223 | if (events & (NVREG_IRQ_TX_ERR)) { | |
2224 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | |
2225 | dev->name, events); | |
2226 | } | |
2227 | if (events & (NVREG_IRQ_UNKNOWN)) { | |
2228 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | |
2229 | dev->name, events); | |
2230 | } | |
2231 | if (i > max_interrupt_work) { | |
2232 | spin_lock(&np->lock); | |
2233 | /* disable interrupts on the nic */ | |
d33a73c8 AA |
2234 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
2235 | writel(0, base + NvRegIrqMask); | |
2236 | else | |
2237 | writel(np->irqmask, base + NvRegIrqMask); | |
1da177e4 LT |
2238 | pci_push(base); |
2239 | ||
d33a73c8 AA |
2240 | if (!np->in_shutdown) { |
2241 | np->nic_poll_irq = np->irqmask; | |
1da177e4 | 2242 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
d33a73c8 | 2243 | } |
1da177e4 LT |
2244 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); |
2245 | spin_unlock(&np->lock); | |
2246 | break; | |
2247 | } | |
2248 | ||
2249 | } | |
2250 | dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); | |
2251 | ||
2252 | return IRQ_RETVAL(i); | |
2253 | } | |
2254 | ||
d33a73c8 AA |
2255 | static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) |
2256 | { | |
2257 | struct net_device *dev = (struct net_device *) data; | |
2258 | struct fe_priv *np = netdev_priv(dev); | |
2259 | u8 __iomem *base = get_hwbase(dev); | |
2260 | u32 events; | |
2261 | int i; | |
2262 | ||
2263 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); | |
2264 | ||
2265 | for (i=0; ; i++) { | |
2266 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; | |
2267 | writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); | |
2268 | pci_push(base); | |
2269 | dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); | |
2270 | if (!(events & np->irqmask)) | |
2271 | break; | |
2272 | ||
84b3932b | 2273 | spin_lock_irq(&np->lock); |
d33a73c8 | 2274 | nv_tx_done(dev); |
84b3932b | 2275 | spin_unlock_irq(&np->lock); |
f3b197ac | 2276 | |
d33a73c8 AA |
2277 | if (events & (NVREG_IRQ_TX_ERR)) { |
2278 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | |
2279 | dev->name, events); | |
2280 | } | |
2281 | if (i > max_interrupt_work) { | |
84b3932b | 2282 | spin_lock_irq(&np->lock); |
d33a73c8 AA |
2283 | /* disable interrupts on the nic */ |
2284 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | |
2285 | pci_push(base); | |
2286 | ||
2287 | if (!np->in_shutdown) { | |
2288 | np->nic_poll_irq |= NVREG_IRQ_TX_ALL; | |
2289 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
2290 | } | |
2291 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | |
84b3932b | 2292 | spin_unlock_irq(&np->lock); |
d33a73c8 AA |
2293 | break; |
2294 | } | |
2295 | ||
2296 | } | |
2297 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); | |
2298 | ||
2299 | return IRQ_RETVAL(i); | |
2300 | } | |
2301 | ||
2302 | static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |
2303 | { | |
2304 | struct net_device *dev = (struct net_device *) data; | |
2305 | struct fe_priv *np = netdev_priv(dev); | |
2306 | u8 __iomem *base = get_hwbase(dev); | |
2307 | u32 events; | |
2308 | int i; | |
2309 | ||
2310 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); | |
2311 | ||
2312 | for (i=0; ; i++) { | |
2313 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | |
2314 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | |
2315 | pci_push(base); | |
2316 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | |
2317 | if (!(events & np->irqmask)) | |
2318 | break; | |
f3b197ac | 2319 | |
d33a73c8 AA |
2320 | nv_rx_process(dev); |
2321 | if (nv_alloc_rx(dev)) { | |
84b3932b | 2322 | spin_lock_irq(&np->lock); |
d33a73c8 AA |
2323 | if (!np->in_shutdown) |
2324 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
84b3932b | 2325 | spin_unlock_irq(&np->lock); |
d33a73c8 | 2326 | } |
f3b197ac | 2327 | |
d33a73c8 | 2328 | if (i > max_interrupt_work) { |
84b3932b | 2329 | spin_lock_irq(&np->lock); |
d33a73c8 AA |
2330 | /* disable interrupts on the nic */ |
2331 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
2332 | pci_push(base); | |
2333 | ||
2334 | if (!np->in_shutdown) { | |
2335 | np->nic_poll_irq |= NVREG_IRQ_RX_ALL; | |
2336 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
2337 | } | |
2338 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | |
84b3932b | 2339 | spin_unlock_irq(&np->lock); |
d33a73c8 AA |
2340 | break; |
2341 | } | |
2342 | ||
2343 | } | |
2344 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); | |
2345 | ||
2346 | return IRQ_RETVAL(i); | |
2347 | } | |
2348 | ||
2349 | static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |
2350 | { | |
2351 | struct net_device *dev = (struct net_device *) data; | |
2352 | struct fe_priv *np = netdev_priv(dev); | |
2353 | u8 __iomem *base = get_hwbase(dev); | |
2354 | u32 events; | |
2355 | int i; | |
2356 | ||
2357 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); | |
2358 | ||
2359 | for (i=0; ; i++) { | |
2360 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; | |
2361 | writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); | |
2362 | pci_push(base); | |
2363 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | |
2364 | if (!(events & np->irqmask)) | |
2365 | break; | |
f3b197ac | 2366 | |
d33a73c8 | 2367 | if (events & NVREG_IRQ_LINK) { |
84b3932b | 2368 | spin_lock_irq(&np->lock); |
d33a73c8 | 2369 | nv_link_irq(dev); |
84b3932b | 2370 | spin_unlock_irq(&np->lock); |
d33a73c8 AA |
2371 | } |
2372 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | |
84b3932b | 2373 | spin_lock_irq(&np->lock); |
d33a73c8 | 2374 | nv_linkchange(dev); |
84b3932b | 2375 | spin_unlock_irq(&np->lock); |
d33a73c8 AA |
2376 | np->link_timeout = jiffies + LINK_TIMEOUT; |
2377 | } | |
2378 | if (events & (NVREG_IRQ_UNKNOWN)) { | |
2379 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | |
2380 | dev->name, events); | |
2381 | } | |
2382 | if (i > max_interrupt_work) { | |
84b3932b | 2383 | spin_lock_irq(&np->lock); |
d33a73c8 AA |
2384 | /* disable interrupts on the nic */ |
2385 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | |
2386 | pci_push(base); | |
2387 | ||
2388 | if (!np->in_shutdown) { | |
2389 | np->nic_poll_irq |= NVREG_IRQ_OTHER; | |
2390 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
2391 | } | |
2392 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | |
84b3932b | 2393 | spin_unlock_irq(&np->lock); |
d33a73c8 AA |
2394 | break; |
2395 | } | |
2396 | ||
2397 | } | |
2398 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); | |
2399 | ||
2400 | return IRQ_RETVAL(i); | |
2401 | } | |
2402 | ||
1da177e4 LT |
2403 | static void nv_do_nic_poll(unsigned long data) |
2404 | { | |
2405 | struct net_device *dev = (struct net_device *) data; | |
ac9c1897 | 2406 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 2407 | u8 __iomem *base = get_hwbase(dev); |
d33a73c8 | 2408 | u32 mask = 0; |
1da177e4 | 2409 | |
1da177e4 | 2410 | /* |
d33a73c8 | 2411 | * First disable irq(s) and then |
1da177e4 LT |
2412 | * reenable interrupts on the nic, we have to do this before calling |
2413 | * nv_nic_irq because that may decide to do otherwise | |
2414 | */ | |
d33a73c8 | 2415 | |
84b3932b AA |
2416 | if (!using_multi_irqs(dev)) { |
2417 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
2418 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
2419 | else | |
2420 | disable_irq(dev->irq); | |
d33a73c8 AA |
2421 | mask = np->irqmask; |
2422 | } else { | |
2423 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | |
2424 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
2425 | mask |= NVREG_IRQ_RX_ALL; | |
2426 | } | |
2427 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | |
2428 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
2429 | mask |= NVREG_IRQ_TX_ALL; | |
2430 | } | |
2431 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | |
2432 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
2433 | mask |= NVREG_IRQ_OTHER; | |
2434 | } | |
2435 | } | |
2436 | np->nic_poll_irq = 0; | |
2437 | ||
2438 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | |
f3b197ac | 2439 | |
d33a73c8 | 2440 | writel(mask, base + NvRegIrqMask); |
1da177e4 | 2441 | pci_push(base); |
d33a73c8 | 2442 | |
84b3932b | 2443 | if (!using_multi_irqs(dev)) { |
d33a73c8 | 2444 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); |
84b3932b AA |
2445 | if (np->msi_flags & NV_MSI_X_ENABLED) |
2446 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
2447 | else | |
2448 | enable_irq(dev->irq); | |
d33a73c8 AA |
2449 | } else { |
2450 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | |
2451 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); | |
2452 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
2453 | } | |
2454 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | |
2455 | nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); | |
2456 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
2457 | } | |
2458 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | |
2459 | nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); | |
2460 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
2461 | } | |
2462 | } | |
1da177e4 LT |
2463 | } |
2464 | ||
2918c35d MS |
2465 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2466 | static void nv_poll_controller(struct net_device *dev) | |
2467 | { | |
2468 | nv_do_nic_poll((unsigned long) dev); | |
2469 | } | |
2470 | #endif | |
2471 | ||
1da177e4 LT |
2472 | static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
2473 | { | |
ac9c1897 | 2474 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
2475 | strcpy(info->driver, "forcedeth"); |
2476 | strcpy(info->version, FORCEDETH_VERSION); | |
2477 | strcpy(info->bus_info, pci_name(np->pci_dev)); | |
2478 | } | |
2479 | ||
2480 | static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) | |
2481 | { | |
ac9c1897 | 2482 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
2483 | wolinfo->supported = WAKE_MAGIC; |
2484 | ||
2485 | spin_lock_irq(&np->lock); | |
2486 | if (np->wolenabled) | |
2487 | wolinfo->wolopts = WAKE_MAGIC; | |
2488 | spin_unlock_irq(&np->lock); | |
2489 | } | |
2490 | ||
2491 | static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) | |
2492 | { | |
ac9c1897 | 2493 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
2494 | u8 __iomem *base = get_hwbase(dev); |
2495 | ||
2496 | spin_lock_irq(&np->lock); | |
2497 | if (wolinfo->wolopts == 0) { | |
2498 | writel(0, base + NvRegWakeUpFlags); | |
2499 | np->wolenabled = 0; | |
2500 | } | |
2501 | if (wolinfo->wolopts & WAKE_MAGIC) { | |
2502 | writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags); | |
2503 | np->wolenabled = 1; | |
2504 | } | |
2505 | spin_unlock_irq(&np->lock); | |
2506 | return 0; | |
2507 | } | |
2508 | ||
2509 | static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |
2510 | { | |
2511 | struct fe_priv *np = netdev_priv(dev); | |
2512 | int adv; | |
2513 | ||
2514 | spin_lock_irq(&np->lock); | |
2515 | ecmd->port = PORT_MII; | |
2516 | if (!netif_running(dev)) { | |
2517 | /* We do not track link speed / duplex setting if the | |
2518 | * interface is disabled. Force a link check */ | |
2519 | nv_update_linkspeed(dev); | |
2520 | } | |
2521 | switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { | |
2522 | case NVREG_LINKSPEED_10: | |
2523 | ecmd->speed = SPEED_10; | |
2524 | break; | |
2525 | case NVREG_LINKSPEED_100: | |
2526 | ecmd->speed = SPEED_100; | |
2527 | break; | |
2528 | case NVREG_LINKSPEED_1000: | |
2529 | ecmd->speed = SPEED_1000; | |
2530 | break; | |
2531 | } | |
2532 | ecmd->duplex = DUPLEX_HALF; | |
2533 | if (np->duplex) | |
2534 | ecmd->duplex = DUPLEX_FULL; | |
2535 | ||
2536 | ecmd->autoneg = np->autoneg; | |
2537 | ||
2538 | ecmd->advertising = ADVERTISED_MII; | |
2539 | if (np->autoneg) { | |
2540 | ecmd->advertising |= ADVERTISED_Autoneg; | |
2541 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
2542 | } else { | |
2543 | adv = np->fixed_mode; | |
2544 | } | |
2545 | if (adv & ADVERTISE_10HALF) | |
2546 | ecmd->advertising |= ADVERTISED_10baseT_Half; | |
2547 | if (adv & ADVERTISE_10FULL) | |
2548 | ecmd->advertising |= ADVERTISED_10baseT_Full; | |
2549 | if (adv & ADVERTISE_100HALF) | |
2550 | ecmd->advertising |= ADVERTISED_100baseT_Half; | |
2551 | if (adv & ADVERTISE_100FULL) | |
2552 | ecmd->advertising |= ADVERTISED_100baseT_Full; | |
2553 | if (np->autoneg && np->gigabit == PHY_GIGABIT) { | |
eb91f61b | 2554 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1da177e4 LT |
2555 | if (adv & ADVERTISE_1000FULL) |
2556 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | |
2557 | } | |
2558 | ||
2559 | ecmd->supported = (SUPPORTED_Autoneg | | |
2560 | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | |
2561 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | | |
2562 | SUPPORTED_MII); | |
2563 | if (np->gigabit == PHY_GIGABIT) | |
2564 | ecmd->supported |= SUPPORTED_1000baseT_Full; | |
2565 | ||
2566 | ecmd->phy_address = np->phyaddr; | |
2567 | ecmd->transceiver = XCVR_EXTERNAL; | |
2568 | ||
2569 | /* ignore maxtxpkt, maxrxpkt for now */ | |
2570 | spin_unlock_irq(&np->lock); | |
2571 | return 0; | |
2572 | } | |
2573 | ||
2574 | static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |
2575 | { | |
2576 | struct fe_priv *np = netdev_priv(dev); | |
2577 | ||
2578 | if (ecmd->port != PORT_MII) | |
2579 | return -EINVAL; | |
2580 | if (ecmd->transceiver != XCVR_EXTERNAL) | |
2581 | return -EINVAL; | |
2582 | if (ecmd->phy_address != np->phyaddr) { | |
2583 | /* TODO: support switching between multiple phys. Should be | |
2584 | * trivial, but not enabled due to lack of test hardware. */ | |
2585 | return -EINVAL; | |
2586 | } | |
2587 | if (ecmd->autoneg == AUTONEG_ENABLE) { | |
2588 | u32 mask; | |
2589 | ||
2590 | mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | |
2591 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; | |
2592 | if (np->gigabit == PHY_GIGABIT) | |
2593 | mask |= ADVERTISED_1000baseT_Full; | |
2594 | ||
2595 | if ((ecmd->advertising & mask) == 0) | |
2596 | return -EINVAL; | |
2597 | ||
2598 | } else if (ecmd->autoneg == AUTONEG_DISABLE) { | |
2599 | /* Note: autonegotiation disable, speed 1000 intentionally | |
2600 | * forbidden - noone should need that. */ | |
2601 | ||
2602 | if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) | |
2603 | return -EINVAL; | |
2604 | if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) | |
2605 | return -EINVAL; | |
2606 | } else { | |
2607 | return -EINVAL; | |
2608 | } | |
2609 | ||
2610 | spin_lock_irq(&np->lock); | |
2611 | if (ecmd->autoneg == AUTONEG_ENABLE) { | |
2612 | int adv, bmcr; | |
2613 | ||
2614 | np->autoneg = 1; | |
2615 | ||
2616 | /* advertise only what has been requested */ | |
2617 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
eb91f61b | 2618 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
1da177e4 LT |
2619 | if (ecmd->advertising & ADVERTISED_10baseT_Half) |
2620 | adv |= ADVERTISE_10HALF; | |
2621 | if (ecmd->advertising & ADVERTISED_10baseT_Full) | |
b6d0773f | 2622 | adv |= ADVERTISE_10FULL; |
1da177e4 LT |
2623 | if (ecmd->advertising & ADVERTISED_100baseT_Half) |
2624 | adv |= ADVERTISE_100HALF; | |
2625 | if (ecmd->advertising & ADVERTISED_100baseT_Full) | |
b6d0773f AA |
2626 | adv |= ADVERTISE_100FULL; |
2627 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ | |
2628 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | |
2629 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
2630 | adv |= ADVERTISE_PAUSE_ASYM; | |
1da177e4 LT |
2631 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); |
2632 | ||
2633 | if (np->gigabit == PHY_GIGABIT) { | |
eb91f61b | 2634 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1da177e4 LT |
2635 | adv &= ~ADVERTISE_1000FULL; |
2636 | if (ecmd->advertising & ADVERTISED_1000baseT_Full) | |
2637 | adv |= ADVERTISE_1000FULL; | |
eb91f61b | 2638 | mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); |
1da177e4 LT |
2639 | } |
2640 | ||
2641 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
2642 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | |
2643 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
2644 | ||
2645 | } else { | |
2646 | int adv, bmcr; | |
2647 | ||
2648 | np->autoneg = 0; | |
2649 | ||
2650 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
eb91f61b | 2651 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
1da177e4 LT |
2652 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) |
2653 | adv |= ADVERTISE_10HALF; | |
2654 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) | |
b6d0773f | 2655 | adv |= ADVERTISE_10FULL; |
1da177e4 LT |
2656 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) |
2657 | adv |= ADVERTISE_100HALF; | |
2658 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) | |
b6d0773f AA |
2659 | adv |= ADVERTISE_100FULL; |
2660 | np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); | |
2661 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ | |
2662 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | |
2663 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2664 | } | |
2665 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { | |
2666 | adv |= ADVERTISE_PAUSE_ASYM; | |
2667 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2668 | } | |
1da177e4 LT |
2669 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); |
2670 | np->fixed_mode = adv; | |
2671 | ||
2672 | if (np->gigabit == PHY_GIGABIT) { | |
eb91f61b | 2673 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1da177e4 | 2674 | adv &= ~ADVERTISE_1000FULL; |
eb91f61b | 2675 | mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); |
1da177e4 LT |
2676 | } |
2677 | ||
2678 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
2679 | bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX); | |
2680 | if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL)) | |
2681 | bmcr |= BMCR_FULLDPLX; | |
2682 | if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL)) | |
2683 | bmcr |= BMCR_SPEED100; | |
2684 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
2685 | ||
2686 | if (netif_running(dev)) { | |
2687 | /* Wait a bit and then reconfigure the nic. */ | |
2688 | udelay(10); | |
2689 | nv_linkchange(dev); | |
2690 | } | |
2691 | } | |
2692 | spin_unlock_irq(&np->lock); | |
2693 | ||
2694 | return 0; | |
2695 | } | |
2696 | ||
dc8216c1 | 2697 | #define FORCEDETH_REGS_VER 1 |
dc8216c1 MS |
2698 | |
2699 | static int nv_get_regs_len(struct net_device *dev) | |
2700 | { | |
86a0f043 AA |
2701 | struct fe_priv *np = netdev_priv(dev); |
2702 | return np->register_size; | |
dc8216c1 MS |
2703 | } |
2704 | ||
2705 | static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) | |
2706 | { | |
ac9c1897 | 2707 | struct fe_priv *np = netdev_priv(dev); |
dc8216c1 MS |
2708 | u8 __iomem *base = get_hwbase(dev); |
2709 | u32 *rbuf = buf; | |
2710 | int i; | |
2711 | ||
2712 | regs->version = FORCEDETH_REGS_VER; | |
2713 | spin_lock_irq(&np->lock); | |
86a0f043 | 2714 | for (i = 0;i <= np->register_size/sizeof(u32); i++) |
dc8216c1 MS |
2715 | rbuf[i] = readl(base + i*sizeof(u32)); |
2716 | spin_unlock_irq(&np->lock); | |
2717 | } | |
2718 | ||
2719 | static int nv_nway_reset(struct net_device *dev) | |
2720 | { | |
ac9c1897 | 2721 | struct fe_priv *np = netdev_priv(dev); |
dc8216c1 MS |
2722 | int ret; |
2723 | ||
2724 | spin_lock_irq(&np->lock); | |
2725 | if (np->autoneg) { | |
2726 | int bmcr; | |
2727 | ||
2728 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
2729 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | |
2730 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
2731 | ||
2732 | ret = 0; | |
2733 | } else { | |
2734 | ret = -EINVAL; | |
2735 | } | |
2736 | spin_unlock_irq(&np->lock); | |
2737 | ||
2738 | return ret; | |
2739 | } | |
2740 | ||
0674d594 ZA |
2741 | static int nv_set_tso(struct net_device *dev, u32 value) |
2742 | { | |
2743 | struct fe_priv *np = netdev_priv(dev); | |
2744 | ||
2745 | if ((np->driver_data & DEV_HAS_CHECKSUM)) | |
2746 | return ethtool_op_set_tso(dev, value); | |
2747 | else | |
6a78814f | 2748 | return -EOPNOTSUPP; |
0674d594 | 2749 | } |
0674d594 | 2750 | |
eafa59f6 AA |
2751 | static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) |
2752 | { | |
2753 | struct fe_priv *np = netdev_priv(dev); | |
2754 | ||
2755 | ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; | |
2756 | ring->rx_mini_max_pending = 0; | |
2757 | ring->rx_jumbo_max_pending = 0; | |
2758 | ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; | |
2759 | ||
2760 | ring->rx_pending = np->rx_ring_size; | |
2761 | ring->rx_mini_pending = 0; | |
2762 | ring->rx_jumbo_pending = 0; | |
2763 | ring->tx_pending = np->tx_ring_size; | |
2764 | } | |
2765 | ||
2766 | static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) | |
2767 | { | |
2768 | struct fe_priv *np = netdev_priv(dev); | |
2769 | u8 __iomem *base = get_hwbase(dev); | |
2770 | u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; | |
2771 | dma_addr_t ring_addr; | |
2772 | ||
2773 | if (ring->rx_pending < RX_RING_MIN || | |
2774 | ring->tx_pending < TX_RING_MIN || | |
2775 | ring->rx_mini_pending != 0 || | |
2776 | ring->rx_jumbo_pending != 0 || | |
2777 | (np->desc_ver == DESC_VER_1 && | |
2778 | (ring->rx_pending > RING_MAX_DESC_VER_1 || | |
2779 | ring->tx_pending > RING_MAX_DESC_VER_1)) || | |
2780 | (np->desc_ver != DESC_VER_1 && | |
2781 | (ring->rx_pending > RING_MAX_DESC_VER_2_3 || | |
2782 | ring->tx_pending > RING_MAX_DESC_VER_2_3))) { | |
2783 | return -EINVAL; | |
2784 | } | |
2785 | ||
2786 | /* allocate new rings */ | |
2787 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
2788 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | |
2789 | sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | |
2790 | &ring_addr); | |
2791 | } else { | |
2792 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | |
2793 | sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | |
2794 | &ring_addr); | |
2795 | } | |
2796 | rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); | |
2797 | rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); | |
2798 | tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); | |
2799 | tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL); | |
2800 | tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL); | |
2801 | if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { | |
2802 | /* fall back to old rings */ | |
2803 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
2804 | if(rxtx_ring) | |
2805 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | |
2806 | rxtx_ring, ring_addr); | |
2807 | } else { | |
2808 | if (rxtx_ring) | |
2809 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | |
2810 | rxtx_ring, ring_addr); | |
2811 | } | |
2812 | if (rx_skbuff) | |
2813 | kfree(rx_skbuff); | |
2814 | if (rx_dma) | |
2815 | kfree(rx_dma); | |
2816 | if (tx_skbuff) | |
2817 | kfree(tx_skbuff); | |
2818 | if (tx_dma) | |
2819 | kfree(tx_dma); | |
2820 | if (tx_dma_len) | |
2821 | kfree(tx_dma_len); | |
2822 | goto exit; | |
2823 | } | |
2824 | ||
2825 | if (netif_running(dev)) { | |
2826 | nv_disable_irq(dev); | |
2827 | spin_lock_bh(&dev->xmit_lock); | |
2828 | spin_lock(&np->lock); | |
2829 | /* stop engines */ | |
2830 | nv_stop_rx(dev); | |
2831 | nv_stop_tx(dev); | |
2832 | nv_txrx_reset(dev); | |
2833 | /* drain queues */ | |
2834 | nv_drain_rx(dev); | |
2835 | nv_drain_tx(dev); | |
2836 | /* delete queues */ | |
2837 | free_rings(dev); | |
2838 | } | |
2839 | ||
2840 | /* set new values */ | |
2841 | np->rx_ring_size = ring->rx_pending; | |
2842 | np->tx_ring_size = ring->tx_pending; | |
2843 | np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE; | |
2844 | np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1; | |
2845 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
2846 | np->rx_ring.orig = (struct ring_desc*)rxtx_ring; | |
2847 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; | |
2848 | } else { | |
2849 | np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; | |
2850 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; | |
2851 | } | |
2852 | np->rx_skbuff = (struct sk_buff**)rx_skbuff; | |
2853 | np->rx_dma = (dma_addr_t*)rx_dma; | |
2854 | np->tx_skbuff = (struct sk_buff**)tx_skbuff; | |
2855 | np->tx_dma = (dma_addr_t*)tx_dma; | |
2856 | np->tx_dma_len = (unsigned int*)tx_dma_len; | |
2857 | np->ring_addr = ring_addr; | |
2858 | ||
2859 | memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); | |
2860 | memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); | |
2861 | memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); | |
2862 | memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); | |
2863 | memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); | |
2864 | ||
2865 | if (netif_running(dev)) { | |
2866 | /* reinit driver view of the queues */ | |
2867 | set_bufsize(dev); | |
2868 | if (nv_init_ring(dev)) { | |
2869 | if (!np->in_shutdown) | |
2870 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
2871 | } | |
2872 | ||
2873 | /* reinit nic view of the queues */ | |
2874 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
2875 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | |
2876 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | |
2877 | base + NvRegRingSizes); | |
2878 | pci_push(base); | |
2879 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
2880 | pci_push(base); | |
2881 | ||
2882 | /* restart engines */ | |
2883 | nv_start_rx(dev); | |
2884 | nv_start_tx(dev); | |
2885 | spin_unlock(&np->lock); | |
2886 | spin_unlock_bh(&dev->xmit_lock); | |
2887 | nv_enable_irq(dev); | |
2888 | } | |
2889 | return 0; | |
2890 | exit: | |
2891 | return -ENOMEM; | |
2892 | } | |
2893 | ||
b6d0773f AA |
2894 | static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) |
2895 | { | |
2896 | struct fe_priv *np = netdev_priv(dev); | |
2897 | ||
2898 | pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; | |
2899 | pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; | |
2900 | pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; | |
2901 | } | |
2902 | ||
2903 | static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) | |
2904 | { | |
2905 | struct fe_priv *np = netdev_priv(dev); | |
2906 | int adv, bmcr; | |
2907 | ||
2908 | if ((!np->autoneg && np->duplex == 0) || | |
2909 | (np->autoneg && !pause->autoneg && np->duplex == 0)) { | |
2910 | printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", | |
2911 | dev->name); | |
2912 | return -EINVAL; | |
2913 | } | |
2914 | if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { | |
2915 | printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); | |
2916 | return -EINVAL; | |
2917 | } | |
2918 | ||
2919 | netif_carrier_off(dev); | |
2920 | if (netif_running(dev)) { | |
2921 | nv_disable_irq(dev); | |
2922 | spin_lock_bh(&dev->xmit_lock); | |
2923 | spin_lock(&np->lock); | |
2924 | /* stop engines */ | |
2925 | nv_stop_rx(dev); | |
2926 | nv_stop_tx(dev); | |
2927 | spin_unlock(&np->lock); | |
2928 | spin_unlock_bh(&dev->xmit_lock); | |
2929 | } | |
2930 | ||
2931 | np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); | |
2932 | if (pause->rx_pause) | |
2933 | np->pause_flags |= NV_PAUSEFRAME_RX_REQ; | |
2934 | if (pause->tx_pause) | |
2935 | np->pause_flags |= NV_PAUSEFRAME_TX_REQ; | |
2936 | ||
2937 | if (np->autoneg && pause->autoneg) { | |
2938 | np->pause_flags |= NV_PAUSEFRAME_AUTONEG; | |
2939 | ||
2940 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
2941 | adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); | |
2942 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ | |
2943 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | |
2944 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
2945 | adv |= ADVERTISE_PAUSE_ASYM; | |
2946 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); | |
2947 | ||
2948 | if (netif_running(dev)) | |
2949 | printk(KERN_INFO "%s: link down.\n", dev->name); | |
2950 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
2951 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | |
2952 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
2953 | } else { | |
2954 | np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); | |
2955 | if (pause->rx_pause) | |
2956 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2957 | if (pause->tx_pause) | |
2958 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2959 | ||
2960 | if (!netif_running(dev)) | |
2961 | nv_update_linkspeed(dev); | |
2962 | else | |
2963 | nv_update_pause(dev, np->pause_flags); | |
2964 | } | |
2965 | ||
2966 | if (netif_running(dev)) { | |
2967 | nv_start_rx(dev); | |
2968 | nv_start_tx(dev); | |
2969 | nv_enable_irq(dev); | |
2970 | } | |
2971 | return 0; | |
2972 | } | |
2973 | ||
1da177e4 LT |
2974 | static struct ethtool_ops ops = { |
2975 | .get_drvinfo = nv_get_drvinfo, | |
2976 | .get_link = ethtool_op_get_link, | |
2977 | .get_wol = nv_get_wol, | |
2978 | .set_wol = nv_set_wol, | |
2979 | .get_settings = nv_get_settings, | |
2980 | .set_settings = nv_set_settings, | |
dc8216c1 MS |
2981 | .get_regs_len = nv_get_regs_len, |
2982 | .get_regs = nv_get_regs, | |
2983 | .nway_reset = nv_nway_reset, | |
c704b856 | 2984 | .get_perm_addr = ethtool_op_get_perm_addr, |
0674d594 | 2985 | .get_tso = ethtool_op_get_tso, |
6a78814f | 2986 | .set_tso = nv_set_tso, |
eafa59f6 AA |
2987 | .get_ringparam = nv_get_ringparam, |
2988 | .set_ringparam = nv_set_ringparam, | |
b6d0773f AA |
2989 | .get_pauseparam = nv_get_pauseparam, |
2990 | .set_pauseparam = nv_set_pauseparam, | |
1da177e4 LT |
2991 | }; |
2992 | ||
ee407b02 AA |
2993 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) |
2994 | { | |
2995 | struct fe_priv *np = get_nvpriv(dev); | |
2996 | ||
2997 | spin_lock_irq(&np->lock); | |
2998 | ||
2999 | /* save vlan group */ | |
3000 | np->vlangrp = grp; | |
3001 | ||
3002 | if (grp) { | |
3003 | /* enable vlan on MAC */ | |
3004 | np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; | |
3005 | } else { | |
3006 | /* disable vlan on MAC */ | |
3007 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; | |
3008 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; | |
3009 | } | |
3010 | ||
3011 | writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
3012 | ||
3013 | spin_unlock_irq(&np->lock); | |
3014 | }; | |
3015 | ||
3016 | static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |
3017 | { | |
3018 | /* nothing to do */ | |
3019 | }; | |
3020 | ||
d33a73c8 AA |
3021 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) |
3022 | { | |
3023 | u8 __iomem *base = get_hwbase(dev); | |
3024 | int i; | |
3025 | u32 msixmap = 0; | |
3026 | ||
3027 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | |
3028 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | |
3029 | * the remaining 8 interrupts. | |
3030 | */ | |
3031 | for (i = 0; i < 8; i++) { | |
3032 | if ((irqmask >> i) & 0x1) { | |
3033 | msixmap |= vector << (i << 2); | |
3034 | } | |
3035 | } | |
3036 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | |
3037 | ||
3038 | msixmap = 0; | |
3039 | for (i = 0; i < 8; i++) { | |
3040 | if ((irqmask >> (i + 8)) & 0x1) { | |
3041 | msixmap |= vector << (i << 2); | |
3042 | } | |
3043 | } | |
3044 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | |
3045 | } | |
3046 | ||
84b3932b AA |
3047 | static int nv_request_irq(struct net_device *dev) |
3048 | { | |
3049 | struct fe_priv *np = get_nvpriv(dev); | |
3050 | u8 __iomem *base = get_hwbase(dev); | |
3051 | int ret = 1; | |
3052 | int i; | |
3053 | ||
3054 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | |
3055 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | |
3056 | np->msi_x_entry[i].entry = i; | |
3057 | } | |
3058 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | |
3059 | np->msi_flags |= NV_MSI_X_ENABLED; | |
3060 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | |
3061 | /* Request irq for rx handling */ | |
3062 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | |
3063 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | |
3064 | pci_disable_msix(np->pci_dev); | |
3065 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
3066 | goto out_err; | |
3067 | } | |
3068 | /* Request irq for tx handling */ | |
3069 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | |
3070 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | |
3071 | pci_disable_msix(np->pci_dev); | |
3072 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
3073 | goto out_free_rx; | |
3074 | } | |
3075 | /* Request irq for link and timer handling */ | |
3076 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | |
3077 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | |
3078 | pci_disable_msix(np->pci_dev); | |
3079 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
3080 | goto out_free_tx; | |
3081 | } | |
3082 | /* map interrupts to their respective vector */ | |
3083 | writel(0, base + NvRegMSIXMap0); | |
3084 | writel(0, base + NvRegMSIXMap1); | |
3085 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | |
3086 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | |
3087 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | |
3088 | } else { | |
3089 | /* Request irq for all interrupts */ | |
3090 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | |
3091 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | |
3092 | pci_disable_msix(np->pci_dev); | |
3093 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
3094 | goto out_err; | |
3095 | } | |
3096 | ||
3097 | /* map interrupts to vector 0 */ | |
3098 | writel(0, base + NvRegMSIXMap0); | |
3099 | writel(0, base + NvRegMSIXMap1); | |
3100 | } | |
3101 | } | |
3102 | } | |
3103 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | |
3104 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | |
3105 | np->msi_flags |= NV_MSI_ENABLED; | |
3106 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | |
3107 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | |
3108 | pci_disable_msi(np->pci_dev); | |
3109 | np->msi_flags &= ~NV_MSI_ENABLED; | |
3110 | goto out_err; | |
3111 | } | |
3112 | ||
3113 | /* map interrupts to vector 0 */ | |
3114 | writel(0, base + NvRegMSIMap0); | |
3115 | writel(0, base + NvRegMSIMap1); | |
3116 | /* enable msi vector 0 */ | |
3117 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | |
3118 | } | |
3119 | } | |
3120 | if (ret != 0) { | |
3121 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | |
3122 | goto out_err; | |
3123 | } | |
3124 | ||
3125 | return 0; | |
3126 | out_free_tx: | |
3127 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); | |
3128 | out_free_rx: | |
3129 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); | |
3130 | out_err: | |
3131 | return 1; | |
3132 | } | |
3133 | ||
3134 | static void nv_free_irq(struct net_device *dev) | |
3135 | { | |
3136 | struct fe_priv *np = get_nvpriv(dev); | |
3137 | int i; | |
3138 | ||
3139 | if (np->msi_flags & NV_MSI_X_ENABLED) { | |
3140 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | |
3141 | free_irq(np->msi_x_entry[i].vector, dev); | |
3142 | } | |
3143 | pci_disable_msix(np->pci_dev); | |
3144 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
3145 | } else { | |
3146 | free_irq(np->pci_dev->irq, dev); | |
3147 | if (np->msi_flags & NV_MSI_ENABLED) { | |
3148 | pci_disable_msi(np->pci_dev); | |
3149 | np->msi_flags &= ~NV_MSI_ENABLED; | |
3150 | } | |
3151 | } | |
3152 | } | |
3153 | ||
1da177e4 LT |
3154 | static int nv_open(struct net_device *dev) |
3155 | { | |
ac9c1897 | 3156 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 3157 | u8 __iomem *base = get_hwbase(dev); |
d33a73c8 AA |
3158 | int ret = 1; |
3159 | int oom, i; | |
1da177e4 LT |
3160 | |
3161 | dprintk(KERN_DEBUG "nv_open: begin\n"); | |
3162 | ||
3163 | /* 1) erase previous misconfiguration */ | |
86a0f043 AA |
3164 | if (np->driver_data & DEV_HAS_POWER_CNTRL) |
3165 | nv_mac_reset(dev); | |
1da177e4 LT |
3166 | /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ |
3167 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | |
3168 | writel(0, base + NvRegMulticastAddrB); | |
3169 | writel(0, base + NvRegMulticastMaskA); | |
3170 | writel(0, base + NvRegMulticastMaskB); | |
3171 | writel(0, base + NvRegPacketFilterFlags); | |
3172 | ||
3173 | writel(0, base + NvRegTransmitterControl); | |
3174 | writel(0, base + NvRegReceiverControl); | |
3175 | ||
3176 | writel(0, base + NvRegAdapterControl); | |
3177 | ||
eb91f61b AA |
3178 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) |
3179 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | |
3180 | ||
1da177e4 | 3181 | /* 2) initialize descriptor rings */ |
d81c0983 | 3182 | set_bufsize(dev); |
1da177e4 LT |
3183 | oom = nv_init_ring(dev); |
3184 | ||
3185 | writel(0, base + NvRegLinkSpeed); | |
3186 | writel(0, base + NvRegUnknownTransmitterReg); | |
3187 | nv_txrx_reset(dev); | |
3188 | writel(0, base + NvRegUnknownSetupReg6); | |
3189 | ||
3190 | np->in_shutdown = 0; | |
3191 | ||
3192 | /* 3) set mac address */ | |
72b31782 | 3193 | nv_copy_mac_to_hw(dev); |
1da177e4 LT |
3194 | |
3195 | /* 4) give hw rings */ | |
0832b25a | 3196 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
eafa59f6 | 3197 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
1da177e4 LT |
3198 | base + NvRegRingSizes); |
3199 | ||
3200 | /* 5) continue setup */ | |
3201 | writel(np->linkspeed, base + NvRegLinkSpeed); | |
3202 | writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); | |
8a4ae7f2 | 3203 | writel(np->txrxctl_bits, base + NvRegTxRxControl); |
ee407b02 | 3204 | writel(np->vlanctl_bits, base + NvRegVlanControl); |
1da177e4 | 3205 | pci_push(base); |
8a4ae7f2 | 3206 | writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); |
1da177e4 LT |
3207 | reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, |
3208 | NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, | |
3209 | KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); | |
3210 | ||
3211 | writel(0, base + NvRegUnknownSetupReg4); | |
3212 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
3213 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | |
3214 | ||
3215 | /* 6) continue setup */ | |
3216 | writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); | |
3217 | writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); | |
3218 | writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); | |
d81c0983 | 3219 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
1da177e4 LT |
3220 | |
3221 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); | |
3222 | get_random_bytes(&i, sizeof(i)); | |
3223 | writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); | |
3224 | writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1); | |
3225 | writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2); | |
a971c324 AA |
3226 | if (poll_interval == -1) { |
3227 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) | |
3228 | writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); | |
3229 | else | |
3230 | writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); | |
3231 | } | |
3232 | else | |
3233 | writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); | |
1da177e4 LT |
3234 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); |
3235 | writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, | |
3236 | base + NvRegAdapterControl); | |
3237 | writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); | |
3238 | writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); | |
3239 | writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags); | |
3240 | ||
3241 | i = readl(base + NvRegPowerState); | |
3242 | if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) | |
3243 | writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); | |
3244 | ||
3245 | pci_push(base); | |
3246 | udelay(10); | |
3247 | writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); | |
3248 | ||
84b3932b | 3249 | nv_disable_hw_interrupts(dev, np->irqmask); |
1da177e4 LT |
3250 | pci_push(base); |
3251 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | |
3252 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
3253 | pci_push(base); | |
3254 | ||
84b3932b AA |
3255 | if (nv_request_irq(dev)) { |
3256 | goto out_drain; | |
d33a73c8 | 3257 | } |
1da177e4 LT |
3258 | |
3259 | /* ask for interrupts */ | |
84b3932b | 3260 | nv_enable_hw_interrupts(dev, np->irqmask); |
1da177e4 LT |
3261 | |
3262 | spin_lock_irq(&np->lock); | |
3263 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | |
3264 | writel(0, base + NvRegMulticastAddrB); | |
3265 | writel(0, base + NvRegMulticastMaskA); | |
3266 | writel(0, base + NvRegMulticastMaskB); | |
3267 | writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); | |
3268 | /* One manual link speed update: Interrupts are enabled, future link | |
3269 | * speed changes cause interrupts and are handled by nv_link_irq(). | |
3270 | */ | |
3271 | { | |
3272 | u32 miistat; | |
3273 | miistat = readl(base + NvRegMIIStatus); | |
3274 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
3275 | dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); | |
3276 | } | |
1b1b3c9b MS |
3277 | /* set linkspeed to invalid value, thus force nv_update_linkspeed |
3278 | * to init hw */ | |
3279 | np->linkspeed = 0; | |
1da177e4 LT |
3280 | ret = nv_update_linkspeed(dev); |
3281 | nv_start_rx(dev); | |
3282 | nv_start_tx(dev); | |
3283 | netif_start_queue(dev); | |
3284 | if (ret) { | |
3285 | netif_carrier_on(dev); | |
3286 | } else { | |
3287 | printk("%s: no link during initialization.\n", dev->name); | |
3288 | netif_carrier_off(dev); | |
3289 | } | |
3290 | if (oom) | |
3291 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
3292 | spin_unlock_irq(&np->lock); | |
3293 | ||
3294 | return 0; | |
3295 | out_drain: | |
3296 | drain_ring(dev); | |
3297 | return ret; | |
3298 | } | |
3299 | ||
3300 | static int nv_close(struct net_device *dev) | |
3301 | { | |
ac9c1897 | 3302 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
3303 | u8 __iomem *base; |
3304 | ||
3305 | spin_lock_irq(&np->lock); | |
3306 | np->in_shutdown = 1; | |
3307 | spin_unlock_irq(&np->lock); | |
3308 | synchronize_irq(dev->irq); | |
3309 | ||
3310 | del_timer_sync(&np->oom_kick); | |
3311 | del_timer_sync(&np->nic_poll); | |
3312 | ||
3313 | netif_stop_queue(dev); | |
3314 | spin_lock_irq(&np->lock); | |
3315 | nv_stop_tx(dev); | |
3316 | nv_stop_rx(dev); | |
3317 | nv_txrx_reset(dev); | |
3318 | ||
3319 | /* disable interrupts on the nic or we will lock up */ | |
3320 | base = get_hwbase(dev); | |
84b3932b | 3321 | nv_disable_hw_interrupts(dev, np->irqmask); |
1da177e4 LT |
3322 | pci_push(base); |
3323 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); | |
3324 | ||
3325 | spin_unlock_irq(&np->lock); | |
3326 | ||
84b3932b | 3327 | nv_free_irq(dev); |
1da177e4 LT |
3328 | |
3329 | drain_ring(dev); | |
3330 | ||
3331 | if (np->wolenabled) | |
3332 | nv_start_rx(dev); | |
3333 | ||
b3df9f81 MS |
3334 | /* special op: write back the misordered MAC address - otherwise |
3335 | * the next nv_probe would see a wrong address. | |
3336 | */ | |
3337 | writel(np->orig_mac[0], base + NvRegMacAddrA); | |
3338 | writel(np->orig_mac[1], base + NvRegMacAddrB); | |
3339 | ||
1da177e4 LT |
3340 | /* FIXME: power down nic */ |
3341 | ||
3342 | return 0; | |
3343 | } | |
3344 | ||
3345 | static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |
3346 | { | |
3347 | struct net_device *dev; | |
3348 | struct fe_priv *np; | |
3349 | unsigned long addr; | |
3350 | u8 __iomem *base; | |
3351 | int err, i; | |
86a0f043 | 3352 | u32 powerstate; |
1da177e4 LT |
3353 | |
3354 | dev = alloc_etherdev(sizeof(struct fe_priv)); | |
3355 | err = -ENOMEM; | |
3356 | if (!dev) | |
3357 | goto out; | |
3358 | ||
ac9c1897 | 3359 | np = netdev_priv(dev); |
1da177e4 LT |
3360 | np->pci_dev = pci_dev; |
3361 | spin_lock_init(&np->lock); | |
3362 | SET_MODULE_OWNER(dev); | |
3363 | SET_NETDEV_DEV(dev, &pci_dev->dev); | |
3364 | ||
3365 | init_timer(&np->oom_kick); | |
3366 | np->oom_kick.data = (unsigned long) dev; | |
3367 | np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ | |
3368 | init_timer(&np->nic_poll); | |
3369 | np->nic_poll.data = (unsigned long) dev; | |
3370 | np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ | |
3371 | ||
3372 | err = pci_enable_device(pci_dev); | |
3373 | if (err) { | |
3374 | printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", | |
3375 | err, pci_name(pci_dev)); | |
3376 | goto out_free; | |
3377 | } | |
3378 | ||
3379 | pci_set_master(pci_dev); | |
3380 | ||
3381 | err = pci_request_regions(pci_dev, DRV_NAME); | |
3382 | if (err < 0) | |
3383 | goto out_disable; | |
3384 | ||
86a0f043 AA |
3385 | if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL)) |
3386 | np->register_size = NV_PCI_REGSZ_VER2; | |
3387 | else | |
3388 | np->register_size = NV_PCI_REGSZ_VER1; | |
3389 | ||
1da177e4 LT |
3390 | err = -EINVAL; |
3391 | addr = 0; | |
3392 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
3393 | dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", | |
3394 | pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), | |
3395 | pci_resource_len(pci_dev, i), | |
3396 | pci_resource_flags(pci_dev, i)); | |
3397 | if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && | |
86a0f043 | 3398 | pci_resource_len(pci_dev, i) >= np->register_size) { |
1da177e4 LT |
3399 | addr = pci_resource_start(pci_dev, i); |
3400 | break; | |
3401 | } | |
3402 | } | |
3403 | if (i == DEVICE_COUNT_RESOURCE) { | |
3404 | printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", | |
3405 | pci_name(pci_dev)); | |
3406 | goto out_relreg; | |
3407 | } | |
3408 | ||
86a0f043 AA |
3409 | /* copy of driver data */ |
3410 | np->driver_data = id->driver_data; | |
3411 | ||
1da177e4 | 3412 | /* handle different descriptor versions */ |
ee73362c MS |
3413 | if (id->driver_data & DEV_HAS_HIGH_DMA) { |
3414 | /* packet format 3: supports 40-bit addressing */ | |
3415 | np->desc_ver = DESC_VER_3; | |
84b3932b | 3416 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; |
910638ae | 3417 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { |
ee73362c MS |
3418 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", |
3419 | pci_name(pci_dev)); | |
ac9c1897 | 3420 | } else { |
84b3932b AA |
3421 | dev->features |= NETIF_F_HIGHDMA; |
3422 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | |
3423 | } | |
3424 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { | |
3425 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", | |
3426 | pci_name(pci_dev)); | |
ee73362c MS |
3427 | } |
3428 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | |
3429 | /* packet format 2: supports jumbo frames */ | |
1da177e4 | 3430 | np->desc_ver = DESC_VER_2; |
8a4ae7f2 | 3431 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; |
ee73362c MS |
3432 | } else { |
3433 | /* original packet format */ | |
3434 | np->desc_ver = DESC_VER_1; | |
8a4ae7f2 | 3435 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; |
d81c0983 | 3436 | } |
ee73362c MS |
3437 | |
3438 | np->pkt_limit = NV_PKTLIMIT_1; | |
3439 | if (id->driver_data & DEV_HAS_LARGEDESC) | |
3440 | np->pkt_limit = NV_PKTLIMIT_2; | |
3441 | ||
8a4ae7f2 MS |
3442 | if (id->driver_data & DEV_HAS_CHECKSUM) { |
3443 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; | |
ac9c1897 AA |
3444 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
3445 | #ifdef NETIF_F_TSO | |
fa45459e | 3446 | dev->features |= NETIF_F_TSO; |
ac9c1897 AA |
3447 | #endif |
3448 | } | |
8a4ae7f2 | 3449 | |
ee407b02 AA |
3450 | np->vlanctl_bits = 0; |
3451 | if (id->driver_data & DEV_HAS_VLAN) { | |
3452 | np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; | |
3453 | dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; | |
3454 | dev->vlan_rx_register = nv_vlan_rx_register; | |
3455 | dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; | |
3456 | } | |
3457 | ||
d33a73c8 AA |
3458 | np->msi_flags = 0; |
3459 | if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) { | |
3460 | np->msi_flags |= NV_MSI_CAPABLE; | |
3461 | } | |
3462 | if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) { | |
3463 | np->msi_flags |= NV_MSI_X_CAPABLE; | |
3464 | } | |
3465 | ||
b6d0773f | 3466 | np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; |
eb91f61b | 3467 | if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { |
b6d0773f | 3468 | np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; |
eb91f61b | 3469 | } |
f3b197ac | 3470 | |
eb91f61b | 3471 | |
1da177e4 | 3472 | err = -ENOMEM; |
86a0f043 | 3473 | np->base = ioremap(addr, np->register_size); |
1da177e4 LT |
3474 | if (!np->base) |
3475 | goto out_relreg; | |
3476 | dev->base_addr = (unsigned long)np->base; | |
ee73362c | 3477 | |
1da177e4 | 3478 | dev->irq = pci_dev->irq; |
ee73362c | 3479 | |
eafa59f6 AA |
3480 | np->rx_ring_size = RX_RING_DEFAULT; |
3481 | np->tx_ring_size = TX_RING_DEFAULT; | |
3482 | np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE; | |
3483 | np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1; | |
3484 | ||
ee73362c MS |
3485 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
3486 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, | |
eafa59f6 | 3487 | sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), |
ee73362c MS |
3488 | &np->ring_addr); |
3489 | if (!np->rx_ring.orig) | |
3490 | goto out_unmap; | |
eafa59f6 | 3491 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; |
ee73362c MS |
3492 | } else { |
3493 | np->rx_ring.ex = pci_alloc_consistent(pci_dev, | |
eafa59f6 | 3494 | sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), |
ee73362c MS |
3495 | &np->ring_addr); |
3496 | if (!np->rx_ring.ex) | |
3497 | goto out_unmap; | |
eafa59f6 AA |
3498 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; |
3499 | } | |
3500 | np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); | |
3501 | np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); | |
3502 | np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); | |
3503 | np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL); | |
3504 | np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL); | |
3505 | if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len) | |
3506 | goto out_freering; | |
3507 | memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); | |
3508 | memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); | |
3509 | memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); | |
3510 | memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); | |
3511 | memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); | |
1da177e4 LT |
3512 | |
3513 | dev->open = nv_open; | |
3514 | dev->stop = nv_close; | |
3515 | dev->hard_start_xmit = nv_start_xmit; | |
3516 | dev->get_stats = nv_get_stats; | |
3517 | dev->change_mtu = nv_change_mtu; | |
72b31782 | 3518 | dev->set_mac_address = nv_set_mac_address; |
1da177e4 | 3519 | dev->set_multicast_list = nv_set_multicast; |
2918c35d MS |
3520 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3521 | dev->poll_controller = nv_poll_controller; | |
3522 | #endif | |
1da177e4 LT |
3523 | SET_ETHTOOL_OPS(dev, &ops); |
3524 | dev->tx_timeout = nv_tx_timeout; | |
3525 | dev->watchdog_timeo = NV_WATCHDOG_TIMEO; | |
3526 | ||
3527 | pci_set_drvdata(pci_dev, dev); | |
3528 | ||
3529 | /* read the mac address */ | |
3530 | base = get_hwbase(dev); | |
3531 | np->orig_mac[0] = readl(base + NvRegMacAddrA); | |
3532 | np->orig_mac[1] = readl(base + NvRegMacAddrB); | |
3533 | ||
3534 | dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; | |
3535 | dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; | |
3536 | dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; | |
3537 | dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; | |
3538 | dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; | |
3539 | dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; | |
c704b856 | 3540 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
1da177e4 | 3541 | |
c704b856 | 3542 | if (!is_valid_ether_addr(dev->perm_addr)) { |
1da177e4 LT |
3543 | /* |
3544 | * Bad mac address. At least one bios sets the mac address | |
3545 | * to 01:23:45:67:89:ab | |
3546 | */ | |
3547 | printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n", | |
3548 | pci_name(pci_dev), | |
3549 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | |
3550 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | |
3551 | printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); | |
3552 | dev->dev_addr[0] = 0x00; | |
3553 | dev->dev_addr[1] = 0x00; | |
3554 | dev->dev_addr[2] = 0x6c; | |
3555 | get_random_bytes(&dev->dev_addr[3], 3); | |
3556 | } | |
3557 | ||
3558 | dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev), | |
3559 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | |
3560 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | |
3561 | ||
3562 | /* disable WOL */ | |
3563 | writel(0, base + NvRegWakeUpFlags); | |
3564 | np->wolenabled = 0; | |
3565 | ||
86a0f043 AA |
3566 | if (id->driver_data & DEV_HAS_POWER_CNTRL) { |
3567 | u8 revision_id; | |
3568 | pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id); | |
3569 | ||
3570 | /* take phy and nic out of low power mode */ | |
3571 | powerstate = readl(base + NvRegPowerState2); | |
3572 | powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; | |
3573 | if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || | |
3574 | id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && | |
3575 | revision_id >= 0xA3) | |
3576 | powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; | |
3577 | writel(powerstate, base + NvRegPowerState2); | |
3578 | } | |
3579 | ||
1da177e4 | 3580 | if (np->desc_ver == DESC_VER_1) { |
ac9c1897 | 3581 | np->tx_flags = NV_TX_VALID; |
1da177e4 | 3582 | } else { |
ac9c1897 | 3583 | np->tx_flags = NV_TX2_VALID; |
1da177e4 | 3584 | } |
d33a73c8 | 3585 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { |
a971c324 | 3586 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
d33a73c8 AA |
3587 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
3588 | np->msi_flags |= 0x0003; | |
3589 | } else { | |
a971c324 | 3590 | np->irqmask = NVREG_IRQMASK_CPU; |
d33a73c8 AA |
3591 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
3592 | np->msi_flags |= 0x0001; | |
3593 | } | |
a971c324 | 3594 | |
1da177e4 LT |
3595 | if (id->driver_data & DEV_NEED_TIMERIRQ) |
3596 | np->irqmask |= NVREG_IRQ_TIMER; | |
3597 | if (id->driver_data & DEV_NEED_LINKTIMER) { | |
3598 | dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); | |
3599 | np->need_linktimer = 1; | |
3600 | np->link_timeout = jiffies + LINK_TIMEOUT; | |
3601 | } else { | |
3602 | dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); | |
3603 | np->need_linktimer = 0; | |
3604 | } | |
3605 | ||
3606 | /* find a suitable phy */ | |
7a33e45a | 3607 | for (i = 1; i <= 32; i++) { |
1da177e4 | 3608 | int id1, id2; |
7a33e45a | 3609 | int phyaddr = i & 0x1F; |
1da177e4 LT |
3610 | |
3611 | spin_lock_irq(&np->lock); | |
7a33e45a | 3612 | id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); |
1da177e4 LT |
3613 | spin_unlock_irq(&np->lock); |
3614 | if (id1 < 0 || id1 == 0xffff) | |
3615 | continue; | |
3616 | spin_lock_irq(&np->lock); | |
7a33e45a | 3617 | id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); |
1da177e4 LT |
3618 | spin_unlock_irq(&np->lock); |
3619 | if (id2 < 0 || id2 == 0xffff) | |
3620 | continue; | |
3621 | ||
3622 | id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; | |
3623 | id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; | |
3624 | dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", | |
7a33e45a AA |
3625 | pci_name(pci_dev), id1, id2, phyaddr); |
3626 | np->phyaddr = phyaddr; | |
1da177e4 LT |
3627 | np->phy_oui = id1 | id2; |
3628 | break; | |
3629 | } | |
7a33e45a | 3630 | if (i == 33) { |
1da177e4 | 3631 | printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", |
7a33e45a | 3632 | pci_name(pci_dev)); |
eafa59f6 | 3633 | goto out_error; |
1da177e4 | 3634 | } |
f3b197ac | 3635 | |
7a33e45a AA |
3636 | /* reset it */ |
3637 | phy_init(dev); | |
1da177e4 LT |
3638 | |
3639 | /* set default link speed settings */ | |
3640 | np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
3641 | np->duplex = 0; | |
3642 | np->autoneg = 1; | |
3643 | ||
3644 | err = register_netdev(dev); | |
3645 | if (err) { | |
3646 | printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); | |
eafa59f6 | 3647 | goto out_error; |
1da177e4 LT |
3648 | } |
3649 | printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", | |
3650 | dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, | |
3651 | pci_name(pci_dev)); | |
3652 | ||
3653 | return 0; | |
3654 | ||
eafa59f6 | 3655 | out_error: |
1da177e4 | 3656 | pci_set_drvdata(pci_dev, NULL); |
eafa59f6 AA |
3657 | out_freering: |
3658 | free_rings(dev); | |
1da177e4 LT |
3659 | out_unmap: |
3660 | iounmap(get_hwbase(dev)); | |
3661 | out_relreg: | |
3662 | pci_release_regions(pci_dev); | |
3663 | out_disable: | |
3664 | pci_disable_device(pci_dev); | |
3665 | out_free: | |
3666 | free_netdev(dev); | |
3667 | out: | |
3668 | return err; | |
3669 | } | |
3670 | ||
3671 | static void __devexit nv_remove(struct pci_dev *pci_dev) | |
3672 | { | |
3673 | struct net_device *dev = pci_get_drvdata(pci_dev); | |
1da177e4 LT |
3674 | |
3675 | unregister_netdev(dev); | |
3676 | ||
1da177e4 | 3677 | /* free all structures */ |
eafa59f6 | 3678 | free_rings(dev); |
1da177e4 LT |
3679 | iounmap(get_hwbase(dev)); |
3680 | pci_release_regions(pci_dev); | |
3681 | pci_disable_device(pci_dev); | |
3682 | free_netdev(dev); | |
3683 | pci_set_drvdata(pci_dev, NULL); | |
3684 | } | |
3685 | ||
3686 | static struct pci_device_id pci_tbl[] = { | |
3687 | { /* nForce Ethernet Controller */ | |
dc8216c1 | 3688 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), |
c2dba06d | 3689 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
1da177e4 LT |
3690 | }, |
3691 | { /* nForce2 Ethernet Controller */ | |
dc8216c1 | 3692 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), |
c2dba06d | 3693 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
1da177e4 LT |
3694 | }, |
3695 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 3696 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), |
c2dba06d | 3697 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
1da177e4 LT |
3698 | }, |
3699 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 3700 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), |
8a4ae7f2 | 3701 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
3702 | }, |
3703 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 3704 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), |
8a4ae7f2 | 3705 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
3706 | }, |
3707 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 3708 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), |
8a4ae7f2 | 3709 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
3710 | }, |
3711 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 3712 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), |
8a4ae7f2 | 3713 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
3714 | }, |
3715 | { /* CK804 Ethernet Controller */ | |
dc8216c1 | 3716 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), |
8a4ae7f2 | 3717 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, |
1da177e4 LT |
3718 | }, |
3719 | { /* CK804 Ethernet Controller */ | |
dc8216c1 | 3720 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), |
8a4ae7f2 | 3721 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, |
1da177e4 LT |
3722 | }, |
3723 | { /* MCP04 Ethernet Controller */ | |
dc8216c1 | 3724 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), |
8a4ae7f2 | 3725 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, |
1da177e4 LT |
3726 | }, |
3727 | { /* MCP04 Ethernet Controller */ | |
dc8216c1 | 3728 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), |
8a4ae7f2 | 3729 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, |
1da177e4 | 3730 | }, |
9992d4aa | 3731 | { /* MCP51 Ethernet Controller */ |
dc8216c1 | 3732 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), |
86a0f043 | 3733 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, |
9992d4aa MS |
3734 | }, |
3735 | { /* MCP51 Ethernet Controller */ | |
dc8216c1 | 3736 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), |
86a0f043 | 3737 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, |
9992d4aa | 3738 | }, |
f49d16ef | 3739 | { /* MCP55 Ethernet Controller */ |
dc8216c1 | 3740 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
eb91f61b | 3741 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX, |
f49d16ef MS |
3742 | }, |
3743 | { /* MCP55 Ethernet Controller */ | |
dc8216c1 | 3744 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
eb91f61b | 3745 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX, |
f49d16ef | 3746 | }, |
1da177e4 LT |
3747 | {0,}, |
3748 | }; | |
3749 | ||
3750 | static struct pci_driver driver = { | |
3751 | .name = "forcedeth", | |
3752 | .id_table = pci_tbl, | |
3753 | .probe = nv_probe, | |
3754 | .remove = __devexit_p(nv_remove), | |
3755 | }; | |
3756 | ||
3757 | ||
3758 | static int __init init_nic(void) | |
3759 | { | |
3760 | printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); | |
3761 | return pci_module_init(&driver); | |
3762 | } | |
3763 | ||
3764 | static void __exit exit_nic(void) | |
3765 | { | |
3766 | pci_unregister_driver(&driver); | |
3767 | } | |
3768 | ||
3769 | module_param(max_interrupt_work, int, 0); | |
3770 | MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); | |
a971c324 AA |
3771 | module_param(optimization_mode, int, 0); |
3772 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); | |
3773 | module_param(poll_interval, int, 0); | |
3774 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); | |
d33a73c8 AA |
3775 | module_param(disable_msi, int, 0); |
3776 | MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1."); | |
3777 | module_param(disable_msix, int, 0); | |
3778 | MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1."); | |
1da177e4 LT |
3779 | |
3780 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); | |
3781 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); | |
3782 | MODULE_LICENSE("GPL"); | |
3783 | ||
3784 | MODULE_DEVICE_TABLE(pci, pci_tbl); | |
3785 | ||
3786 | module_init(init_nic); | |
3787 | module_exit(exit_nic); |