Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
0a0863af 97 * IV. Receive
1da177e4
LT
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
7734f6e6
DA
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
1da177e4
LT
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
0a0863af 123 * scenario where all Rx resources have been indicated and none re-
1da177e4
LT
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
72001762
AM
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
1da177e4
LT
148 */
149
1da177e4
LT
150#include <linux/module.h>
151#include <linux/moduleparam.h>
152#include <linux/kernel.h>
153#include <linux/types.h>
d43c36dc 154#include <linux/sched.h>
1da177e4
LT
155#include <linux/slab.h>
156#include <linux/delay.h>
157#include <linux/init.h>
158#include <linux/pci.h>
1e7f0bd8 159#include <linux/dma-mapping.h>
98468efd 160#include <linux/dmapool.h>
1da177e4
LT
161#include <linux/netdevice.h>
162#include <linux/etherdevice.h>
163#include <linux/mii.h>
164#include <linux/if_vlan.h>
165#include <linux/skbuff.h>
166#include <linux/ethtool.h>
167#include <linux/string.h>
9ac32e1b 168#include <linux/firmware.h>
401da6ae 169#include <linux/rtnetlink.h>
1da177e4
LT
170#include <asm/unaligned.h>
171
172
173#define DRV_NAME "e100"
4e1dc97d 174#define DRV_EXT "-NAPI"
b55de80e 175#define DRV_VERSION "3.5.24-k2"DRV_EXT
1da177e4 176#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 177#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
178#define PFX DRV_NAME ": "
179
180#define E100_WATCHDOG_PERIOD (2 * HZ)
181#define E100_NAPI_WEIGHT 16
182
9ac32e1b
JSR
183#define FIRMWARE_D101M "e100/d101m_ucode.bin"
184#define FIRMWARE_D101S "e100/d101s_ucode.bin"
185#define FIRMWARE_D102E "e100/d102e_ucode.bin"
186
1da177e4
LT
187MODULE_DESCRIPTION(DRV_DESCRIPTION);
188MODULE_AUTHOR(DRV_COPYRIGHT);
189MODULE_LICENSE("GPL");
190MODULE_VERSION(DRV_VERSION);
9ac32e1b
JSR
191MODULE_FIRMWARE(FIRMWARE_D101M);
192MODULE_FIRMWARE(FIRMWARE_D101S);
193MODULE_FIRMWARE(FIRMWARE_D102E);
1da177e4
LT
194
195static int debug = 3;
8fb6f732 196static int eeprom_bad_csum_allow = 0;
27345bb6 197static int use_io = 0;
1da177e4 198module_param(debug, int, 0);
8fb6f732 199module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 200module_param(use_io, int, 0);
1da177e4 201MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 202MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 203MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
204#define DPRINTK(nlevel, klevel, fmt, args...) \
205 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
206 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
b39d66a8 207 __func__ , ## args))
1da177e4
LT
208
209#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
210 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
211 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
a3aa1884 212static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
1da177e4
LT
213 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
215 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
220 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
226 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
234 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
235 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
242 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
243 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
247 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
b55de80e 248 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
1da177e4
LT
249 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
251 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
253 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 254 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
255 { 0, }
256};
257MODULE_DEVICE_TABLE(pci, e100_id_table);
258
259enum mac {
260 mac_82557_D100_A = 0,
261 mac_82557_D100_B = 1,
262 mac_82557_D100_C = 2,
263 mac_82558_D101_A4 = 4,
264 mac_82558_D101_B0 = 5,
265 mac_82559_D101M = 8,
266 mac_82559_D101S = 9,
267 mac_82550_D102 = 12,
268 mac_82550_D102_C = 13,
269 mac_82551_E = 14,
270 mac_82551_F = 15,
271 mac_82551_10 = 16,
272 mac_unknown = 0xFF,
273};
274
275enum phy {
276 phy_100a = 0x000003E0,
277 phy_100c = 0x035002A8,
278 phy_82555_tx = 0x015002A8,
279 phy_nsc_tx = 0x5C002000,
280 phy_82562_et = 0x033002A8,
281 phy_82562_em = 0x032002A8,
282 phy_82562_ek = 0x031002A8,
283 phy_82562_eh = 0x017002A8,
b55de80e 284 phy_82552_v = 0xd061004d,
1da177e4
LT
285 phy_unknown = 0xFFFFFFFF,
286};
287
288/* CSR (Control/Status Registers) */
289struct csr {
290 struct {
291 u8 status;
292 u8 stat_ack;
293 u8 cmd_lo;
294 u8 cmd_hi;
295 u32 gen_ptr;
296 } scb;
297 u32 port;
298 u16 flash_ctrl;
299 u8 eeprom_ctrl_lo;
300 u8 eeprom_ctrl_hi;
301 u32 mdi_ctrl;
302 u32 rx_dma_count;
303};
304
305enum scb_status {
7734f6e6 306 rus_no_res = 0x08,
1da177e4
LT
307 rus_ready = 0x10,
308 rus_mask = 0x3C,
309};
310
ca93ca42
JG
311enum ru_state {
312 RU_SUSPENDED = 0,
313 RU_RUNNING = 1,
314 RU_UNINITIALIZED = -1,
315};
316
1da177e4
LT
317enum scb_stat_ack {
318 stat_ack_not_ours = 0x00,
319 stat_ack_sw_gen = 0x04,
320 stat_ack_rnr = 0x10,
321 stat_ack_cu_idle = 0x20,
322 stat_ack_frame_rx = 0x40,
323 stat_ack_cu_cmd_done = 0x80,
324 stat_ack_not_present = 0xFF,
325 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
326 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
327};
328
329enum scb_cmd_hi {
330 irq_mask_none = 0x00,
331 irq_mask_all = 0x01,
332 irq_sw_gen = 0x02,
333};
334
335enum scb_cmd_lo {
336 cuc_nop = 0x00,
337 ruc_start = 0x01,
338 ruc_load_base = 0x06,
339 cuc_start = 0x10,
340 cuc_resume = 0x20,
341 cuc_dump_addr = 0x40,
342 cuc_dump_stats = 0x50,
343 cuc_load_base = 0x60,
344 cuc_dump_reset = 0x70,
345};
346
347enum cuc_dump {
348 cuc_dump_complete = 0x0000A005,
349 cuc_dump_reset_complete = 0x0000A007,
350};
05479938 351
1da177e4
LT
352enum port {
353 software_reset = 0x0000,
354 selftest = 0x0001,
355 selective_reset = 0x0002,
356};
357
358enum eeprom_ctrl_lo {
359 eesk = 0x01,
360 eecs = 0x02,
361 eedi = 0x04,
362 eedo = 0x08,
363};
364
365enum mdi_ctrl {
366 mdi_write = 0x04000000,
367 mdi_read = 0x08000000,
368 mdi_ready = 0x10000000,
369};
370
371enum eeprom_op {
372 op_write = 0x05,
373 op_read = 0x06,
374 op_ewds = 0x10,
375 op_ewen = 0x13,
376};
377
378enum eeprom_offsets {
379 eeprom_cnfg_mdix = 0x03,
72001762 380 eeprom_phy_iface = 0x06,
1da177e4
LT
381 eeprom_id = 0x0A,
382 eeprom_config_asf = 0x0D,
383 eeprom_smbus_addr = 0x90,
384};
385
386enum eeprom_cnfg_mdix {
387 eeprom_mdix_enabled = 0x0080,
388};
389
72001762
AM
390enum eeprom_phy_iface {
391 NoSuchPhy = 0,
392 I82553AB,
393 I82553C,
394 I82503,
395 DP83840,
396 S80C240,
397 S80C24,
398 I82555,
399 DP83840A = 10,
400};
401
1da177e4
LT
402enum eeprom_id {
403 eeprom_id_wol = 0x0020,
404};
405
406enum eeprom_config_asf {
407 eeprom_asf = 0x8000,
408 eeprom_gcl = 0x4000,
409};
410
411enum cb_status {
412 cb_complete = 0x8000,
413 cb_ok = 0x2000,
414};
415
416enum cb_command {
417 cb_nop = 0x0000,
418 cb_iaaddr = 0x0001,
419 cb_config = 0x0002,
420 cb_multi = 0x0003,
421 cb_tx = 0x0004,
422 cb_ucode = 0x0005,
423 cb_dump = 0x0006,
424 cb_tx_sf = 0x0008,
425 cb_cid = 0x1f00,
426 cb_i = 0x2000,
427 cb_s = 0x4000,
428 cb_el = 0x8000,
429};
430
431struct rfd {
aaf918ba
AV
432 __le16 status;
433 __le16 command;
434 __le32 link;
435 __le32 rbd;
436 __le16 actual_size;
437 __le16 size;
1da177e4
LT
438};
439
440struct rx {
441 struct rx *next, *prev;
442 struct sk_buff *skb;
443 dma_addr_t dma_addr;
444};
445
446#if defined(__BIG_ENDIAN_BITFIELD)
447#define X(a,b) b,a
448#else
449#define X(a,b) a,b
450#endif
451struct config {
452/*0*/ u8 X(byte_count:6, pad0:2);
453/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
454/*2*/ u8 adaptive_ifs;
455/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
456 term_write_cache_line:1), pad3:4);
457/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
458/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
459/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
460 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
461 rx_discard_overruns:1), rx_save_bad_frames:1);
462/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
463 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
464 tx_dynamic_tbd:1);
465/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
466/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
467 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
468/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
469 loopback:2);
470/*11*/ u8 X(linear_priority:3, pad11:5);
471/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
472/*13*/ u8 ip_addr_lo;
473/*14*/ u8 ip_addr_hi;
474/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
475 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
476 pad15_2:1), crs_or_cdt:1);
477/*16*/ u8 fc_delay_lo;
478/*17*/ u8 fc_delay_hi;
479/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
480 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
481/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
482 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
483 full_duplex_force:1), full_duplex_pin:1);
484/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
485/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
486/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
487 u8 pad_d102[9];
488};
489
490#define E100_MAX_MULTICAST_ADDRS 64
491struct multi {
aaf918ba 492 __le16 count;
1da177e4
LT
493 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
494};
495
496/* Important: keep total struct u32-aligned */
497#define UCODE_SIZE 134
498struct cb {
aaf918ba
AV
499 __le16 status;
500 __le16 command;
501 __le32 link;
1da177e4
LT
502 union {
503 u8 iaaddr[ETH_ALEN];
aaf918ba 504 __le32 ucode[UCODE_SIZE];
1da177e4
LT
505 struct config config;
506 struct multi multi;
507 struct {
508 u32 tbd_array;
509 u16 tcb_byte_count;
510 u8 threshold;
511 u8 tbd_count;
512 struct {
aaf918ba
AV
513 __le32 buf_addr;
514 __le16 size;
1da177e4
LT
515 u16 eol;
516 } tbd;
517 } tcb;
aaf918ba 518 __le32 dump_buffer_addr;
1da177e4
LT
519 } u;
520 struct cb *next, *prev;
521 dma_addr_t dma_addr;
522 struct sk_buff *skb;
523};
524
525enum loopback {
526 lb_none = 0, lb_mac = 1, lb_phy = 3,
527};
528
529struct stats {
aaf918ba 530 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
1da177e4
LT
531 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
532 tx_multiple_collisions, tx_total_collisions;
aaf918ba 533 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
1da177e4
LT
534 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
535 rx_short_frame_errors;
aaf918ba
AV
536 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
537 __le16 xmt_tco_frames, rcv_tco_frames;
538 __le32 complete;
1da177e4
LT
539};
540
541struct mem {
542 struct {
543 u32 signature;
544 u32 result;
545 } selftest;
546 struct stats stats;
547 u8 dump_buf[596];
548};
549
550struct param_range {
551 u32 min;
552 u32 max;
553 u32 count;
554};
555
556struct params {
557 struct param_range rfds;
558 struct param_range cbs;
559};
560
561struct nic {
562 /* Begin: frequently used values: keep adjacent for cache effect */
563 u32 msg_enable ____cacheline_aligned;
564 struct net_device *netdev;
565 struct pci_dev *pdev;
72001762 566 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
1da177e4
LT
567
568 struct rx *rxs ____cacheline_aligned;
569 struct rx *rx_to_use;
570 struct rx *rx_to_clean;
571 struct rfd blank_rfd;
ca93ca42 572 enum ru_state ru_running;
1da177e4
LT
573
574 spinlock_t cb_lock ____cacheline_aligned;
575 spinlock_t cmd_lock;
576 struct csr __iomem *csr;
577 enum scb_cmd_lo cuc_cmd;
578 unsigned int cbs_avail;
bea3348e 579 struct napi_struct napi;
1da177e4
LT
580 struct cb *cbs;
581 struct cb *cb_to_use;
582 struct cb *cb_to_send;
583 struct cb *cb_to_clean;
aaf918ba 584 __le16 tx_command;
1da177e4
LT
585 /* End: frequently used values: keep adjacent for cache effect */
586
587 enum {
588 ich = (1 << 0),
589 promiscuous = (1 << 1),
590 multicast_all = (1 << 2),
591 wol_magic = (1 << 3),
592 ich_10h_workaround = (1 << 4),
593 } flags ____cacheline_aligned;
594
595 enum mac mac;
596 enum phy phy;
597 struct params params;
1da177e4
LT
598 struct timer_list watchdog;
599 struct timer_list blink_timer;
600 struct mii_if_info mii;
2acdb1e0 601 struct work_struct tx_timeout_task;
1da177e4
LT
602 enum loopback loopback;
603
604 struct mem *mem;
605 dma_addr_t dma_addr;
606
98468efd 607 struct pci_pool *cbs_pool;
1da177e4
LT
608 dma_addr_t cbs_dma_addr;
609 u8 adaptive_ifs;
610 u8 tx_threshold;
611 u32 tx_frames;
612 u32 tx_collisions;
613 u32 tx_deferred;
614 u32 tx_single_collisions;
615 u32 tx_multiple_collisions;
616 u32 tx_fc_pause;
617 u32 tx_tco_frames;
618
619 u32 rx_fc_pause;
620 u32 rx_fc_unsupported;
621 u32 rx_tco_frames;
622 u32 rx_over_length_errors;
623
1da177e4
LT
624 u16 leds;
625 u16 eeprom_wc;
aaf918ba 626 __le16 eeprom[256];
ac7c6669 627 spinlock_t mdio_lock;
7e15b0c9 628 const struct firmware *fw;
1da177e4
LT
629};
630
631static inline void e100_write_flush(struct nic *nic)
632{
633 /* Flush previous PCI writes through intermediate bridges
634 * by doing a benign read */
27345bb6 635 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
636}
637
858119e1 638static void e100_enable_irq(struct nic *nic)
1da177e4
LT
639{
640 unsigned long flags;
641
642 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 643 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 644 e100_write_flush(nic);
ad8c48ad 645 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
646}
647
858119e1 648static void e100_disable_irq(struct nic *nic)
1da177e4
LT
649{
650 unsigned long flags;
651
652 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 653 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 654 e100_write_flush(nic);
ad8c48ad 655 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
656}
657
658static void e100_hw_reset(struct nic *nic)
659{
660 /* Put CU and RU into idle with a selective reset to get
661 * device off of PCI bus */
27345bb6 662 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
663 e100_write_flush(nic); udelay(20);
664
665 /* Now fully reset device */
27345bb6 666 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
667 e100_write_flush(nic); udelay(20);
668
669 /* Mask off our interrupt line - it's unmasked after reset */
670 e100_disable_irq(nic);
671}
672
673static int e100_self_test(struct nic *nic)
674{
675 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
676
677 /* Passing the self-test is a pretty good indication
678 * that the device can DMA to/from host memory */
679
680 nic->mem->selftest.signature = 0;
681 nic->mem->selftest.result = 0xFFFFFFFF;
682
27345bb6 683 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
684 e100_write_flush(nic);
685 /* Wait 10 msec for self-test to complete */
686 msleep(10);
687
688 /* Interrupts are enabled after self-test */
689 e100_disable_irq(nic);
690
691 /* Check results of self-test */
f26251eb 692 if (nic->mem->selftest.result != 0) {
1da177e4
LT
693 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
694 nic->mem->selftest.result);
695 return -ETIMEDOUT;
696 }
f26251eb 697 if (nic->mem->selftest.signature == 0) {
1da177e4
LT
698 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
699 return -ETIMEDOUT;
700 }
701
702 return 0;
703}
704
aaf918ba 705static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
1da177e4
LT
706{
707 u32 cmd_addr_data[3];
708 u8 ctrl;
709 int i, j;
710
711 /* Three cmds: write/erase enable, write data, write/erase disable */
712 cmd_addr_data[0] = op_ewen << (addr_len - 2);
713 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
aaf918ba 714 le16_to_cpu(data);
1da177e4
LT
715 cmd_addr_data[2] = op_ewds << (addr_len - 2);
716
717 /* Bit-bang cmds to write word to eeprom */
f26251eb 718 for (j = 0; j < 3; j++) {
1da177e4
LT
719
720 /* Chip select */
27345bb6 721 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
722 e100_write_flush(nic); udelay(4);
723
f26251eb 724 for (i = 31; i >= 0; i--) {
1da177e4
LT
725 ctrl = (cmd_addr_data[j] & (1 << i)) ?
726 eecs | eedi : eecs;
27345bb6 727 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
728 e100_write_flush(nic); udelay(4);
729
27345bb6 730 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
731 e100_write_flush(nic); udelay(4);
732 }
733 /* Wait 10 msec for cmd to complete */
734 msleep(10);
735
736 /* Chip deselect */
27345bb6 737 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
738 e100_write_flush(nic); udelay(4);
739 }
740};
741
742/* General technique stolen from the eepro100 driver - very clever */
aaf918ba 743static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
1da177e4
LT
744{
745 u32 cmd_addr_data;
746 u16 data = 0;
747 u8 ctrl;
748 int i;
749
750 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
751
752 /* Chip select */
27345bb6 753 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
754 e100_write_flush(nic); udelay(4);
755
756 /* Bit-bang to read word from eeprom */
f26251eb 757 for (i = 31; i >= 0; i--) {
1da177e4 758 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 759 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 760 e100_write_flush(nic); udelay(4);
05479938 761
27345bb6 762 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 763 e100_write_flush(nic); udelay(4);
05479938 764
1da177e4
LT
765 /* Eeprom drives a dummy zero to EEDO after receiving
766 * complete address. Use this to adjust addr_len. */
27345bb6 767 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
f26251eb 768 if (!(ctrl & eedo) && i > 16) {
1da177e4
LT
769 *addr_len -= (i - 16);
770 i = 17;
771 }
05479938 772
1da177e4
LT
773 data = (data << 1) | (ctrl & eedo ? 1 : 0);
774 }
775
776 /* Chip deselect */
27345bb6 777 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
778 e100_write_flush(nic); udelay(4);
779
aaf918ba 780 return cpu_to_le16(data);
1da177e4
LT
781};
782
783/* Load entire EEPROM image into driver cache and validate checksum */
784static int e100_eeprom_load(struct nic *nic)
785{
786 u16 addr, addr_len = 8, checksum = 0;
787
788 /* Try reading with an 8-bit addr len to discover actual addr len */
789 e100_eeprom_read(nic, &addr_len, 0);
790 nic->eeprom_wc = 1 << addr_len;
791
f26251eb 792 for (addr = 0; addr < nic->eeprom_wc; addr++) {
1da177e4 793 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
f26251eb 794 if (addr < nic->eeprom_wc - 1)
aaf918ba 795 checksum += le16_to_cpu(nic->eeprom[addr]);
1da177e4
LT
796 }
797
798 /* The checksum, stored in the last word, is calculated such that
799 * the sum of words should be 0xBABA */
aaf918ba 800 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
1da177e4 801 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
802 if (!eeprom_bad_csum_allow)
803 return -EAGAIN;
1da177e4
LT
804 }
805
806 return 0;
807}
808
809/* Save (portion of) driver EEPROM cache to device and update checksum */
810static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
811{
812 u16 addr, addr_len = 8, checksum = 0;
813
814 /* Try reading with an 8-bit addr len to discover actual addr len */
815 e100_eeprom_read(nic, &addr_len, 0);
816 nic->eeprom_wc = 1 << addr_len;
817
f26251eb 818 if (start + count >= nic->eeprom_wc)
1da177e4
LT
819 return -EINVAL;
820
f26251eb 821 for (addr = start; addr < start + count; addr++)
1da177e4
LT
822 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
823
824 /* The checksum, stored in the last word, is calculated such that
825 * the sum of words should be 0xBABA */
f26251eb 826 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
aaf918ba
AV
827 checksum += le16_to_cpu(nic->eeprom[addr]);
828 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
1da177e4
LT
829 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
830 nic->eeprom[nic->eeprom_wc - 1]);
831
832 return 0;
833}
834
962082b6 835#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 836#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 837static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
838{
839 unsigned long flags;
840 unsigned int i;
841 int err = 0;
842
843 spin_lock_irqsave(&nic->cmd_lock, flags);
844
845 /* Previous command is accepted when SCB clears */
f26251eb
BA
846 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
847 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
848 break;
849 cpu_relax();
f26251eb 850 if (unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
851 udelay(5);
852 }
f26251eb 853 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
1da177e4
LT
854 err = -EAGAIN;
855 goto err_unlock;
856 }
857
f26251eb 858 if (unlikely(cmd != cuc_resume))
27345bb6
JB
859 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
860 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
861
862err_unlock:
863 spin_unlock_irqrestore(&nic->cmd_lock, flags);
864
865 return err;
866}
867
858119e1 868static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
869 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
870{
871 struct cb *cb;
872 unsigned long flags;
873 int err = 0;
874
875 spin_lock_irqsave(&nic->cb_lock, flags);
876
f26251eb 877 if (unlikely(!nic->cbs_avail)) {
1da177e4
LT
878 err = -ENOMEM;
879 goto err_unlock;
880 }
881
882 cb = nic->cb_to_use;
883 nic->cb_to_use = cb->next;
884 nic->cbs_avail--;
885 cb->skb = skb;
886
f26251eb 887 if (unlikely(!nic->cbs_avail))
1da177e4
LT
888 err = -ENOSPC;
889
890 cb_prepare(nic, cb, skb);
891
892 /* Order is important otherwise we'll be in a race with h/w:
893 * set S-bit in current first, then clear S-bit in previous. */
894 cb->command |= cpu_to_le16(cb_s);
895 wmb();
896 cb->prev->command &= cpu_to_le16(~cb_s);
897
f26251eb
BA
898 while (nic->cb_to_send != nic->cb_to_use) {
899 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
1da177e4
LT
900 nic->cb_to_send->dma_addr))) {
901 /* Ok, here's where things get sticky. It's
902 * possible that we can't schedule the command
903 * because the controller is too busy, so
904 * let's just queue the command and try again
905 * when another command is scheduled. */
f26251eb 906 if (err == -ENOSPC) {
962082b6
MC
907 //request a reset
908 schedule_work(&nic->tx_timeout_task);
909 }
1da177e4
LT
910 break;
911 } else {
912 nic->cuc_cmd = cuc_resume;
913 nic->cb_to_send = nic->cb_to_send->next;
914 }
915 }
916
917err_unlock:
918 spin_unlock_irqrestore(&nic->cb_lock, flags);
919
920 return err;
921}
922
72001762
AM
923static int mdio_read(struct net_device *netdev, int addr, int reg)
924{
925 struct nic *nic = netdev_priv(netdev);
926 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
927}
928
929static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
930{
931 struct nic *nic = netdev_priv(netdev);
932
933 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
934}
935
936/* the standard mdio_ctrl() function for usual MII-compliant hardware */
937static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
1da177e4
LT
938{
939 u32 data_out = 0;
940 unsigned int i;
ac7c6669 941 unsigned long flags;
1da177e4 942
ac7c6669
OM
943
944 /*
945 * Stratus87247: we shouldn't be writing the MDI control
946 * register until the Ready bit shows True. Also, since
947 * manipulation of the MDI control registers is a multi-step
948 * procedure it should be done under lock.
949 */
950 spin_lock_irqsave(&nic->mdio_lock, flags);
951 for (i = 100; i; --i) {
27345bb6 952 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
953 break;
954 udelay(20);
955 }
956 if (unlikely(!i)) {
957 printk("e100.mdio_ctrl(%s) won't go Ready\n",
958 nic->netdev->name );
959 spin_unlock_irqrestore(&nic->mdio_lock, flags);
960 return 0; /* No way to indicate timeout error */
961 }
27345bb6 962 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 963
ac7c6669 964 for (i = 0; i < 100; i++) {
1da177e4 965 udelay(20);
27345bb6 966 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
967 break;
968 }
ac7c6669 969 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
970 DPRINTK(HW, DEBUG,
971 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
972 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
973 return (u16)data_out;
974}
975
72001762
AM
976/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
977static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
978 u32 addr,
979 u32 dir,
980 u32 reg,
981 u16 data)
982{
983 if ((reg == MII_BMCR) && (dir == mdi_write)) {
984 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
985 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
986 MII_ADVERTISE);
987
988 /*
989 * Workaround Si issue where sometimes the part will not
990 * autoneg to 100Mbps even when advertised.
991 */
992 if (advert & ADVERTISE_100FULL)
993 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
994 else if (advert & ADVERTISE_100HALF)
995 data |= BMCR_SPEED100;
996 }
997 }
998 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1da177e4
LT
999}
1000
72001762
AM
1001/* Fully software-emulated mdio_ctrl() function for cards without
1002 * MII-compliant PHYs.
1003 * For now, this is mainly geared towards 80c24 support; in case of further
1004 * requirements for other types (i82503, ...?) either extend this mechanism
1005 * or split it, whichever is cleaner.
1006 */
1007static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1008 u32 addr,
1009 u32 dir,
1010 u32 reg,
1011 u16 data)
1012{
1013 /* might need to allocate a netdev_priv'ed register array eventually
1014 * to be able to record state changes, but for now
1015 * some fully hardcoded register handling ought to be ok I guess. */
1016
1017 if (dir == mdi_read) {
1018 switch (reg) {
1019 case MII_BMCR:
1020 /* Auto-negotiation, right? */
1021 return BMCR_ANENABLE |
1022 BMCR_FULLDPLX;
1023 case MII_BMSR:
1024 return BMSR_LSTATUS /* for mii_link_ok() */ |
1025 BMSR_ANEGCAPABLE |
1026 BMSR_10FULL;
1027 case MII_ADVERTISE:
1028 /* 80c24 is a "combo card" PHY, right? */
1029 return ADVERTISE_10HALF |
1030 ADVERTISE_10FULL;
1031 default:
1032 DPRINTK(HW, DEBUG,
1033 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1034 dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
1035 return 0xFFFF;
1036 }
1037 } else {
1038 switch (reg) {
1039 default:
1040 DPRINTK(HW, DEBUG,
1041 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1042 dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
1043 return 0xFFFF;
1044 }
b55de80e 1045 }
72001762
AM
1046}
1047static inline int e100_phy_supports_mii(struct nic *nic)
1048{
1049 /* for now, just check it by comparing whether we
1050 are using MII software emulation.
1051 */
1052 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1da177e4
LT
1053}
1054
1055static void e100_get_defaults(struct nic *nic)
1056{
2afecc04
JB
1057 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1058 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 1059
1da177e4 1060 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 1061 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
f26251eb 1062 if (nic->mac == mac_unknown)
1da177e4
LT
1063 nic->mac = mac_82557_D100_A;
1064
1065 nic->params.rfds = rfds;
1066 nic->params.cbs = cbs;
1067
1068 /* Quadwords to DMA into FIFO before starting frame transmit */
1069 nic->tx_threshold = 0xE0;
1070
0a0863af 1071 /* no interrupt for every tx completion, delay = 256us if not 557 */
962082b6
MC
1072 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1073 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
1074
1075 /* Template for a freshly allocated RFD */
7734f6e6 1076 nic->blank_rfd.command = 0;
1172899a 1077 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1da177e4
LT
1078 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1079
1080 /* MII setup */
1081 nic->mii.phy_id_mask = 0x1F;
1082 nic->mii.reg_num_mask = 0x1F;
1083 nic->mii.dev = nic->netdev;
1084 nic->mii.mdio_read = mdio_read;
1085 nic->mii.mdio_write = mdio_write;
1086}
1087
1088static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1089{
1090 struct config *config = &cb->u.config;
1091 u8 *c = (u8 *)config;
1092
1093 cb->command = cpu_to_le16(cb_config);
1094
1095 memset(config, 0, sizeof(struct config));
1096
1097 config->byte_count = 0x16; /* bytes in this struct */
1098 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1099 config->direct_rx_dma = 0x1; /* reserved */
1100 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1101 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1102 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1103 config->tx_underrun_retry = 0x3; /* # of underrun retries */
72001762
AM
1104 if (e100_phy_supports_mii(nic))
1105 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1da177e4
LT
1106 config->pad10 = 0x6;
1107 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1108 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1109 config->ifs = 0x6; /* x16 = inter frame spacing */
1110 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1111 config->pad15_1 = 0x1;
1112 config->pad15_2 = 0x1;
1113 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1114 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1115 config->tx_padding = 0x1; /* 1=pad short frames */
1116 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1117 config->pad18 = 0x1;
1118 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1119 config->pad20_1 = 0x1F;
1120 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1121 config->pad21_1 = 0x5;
1122
1123 config->adaptive_ifs = nic->adaptive_ifs;
1124 config->loopback = nic->loopback;
1125
f26251eb 1126 if (nic->mii.force_media && nic->mii.full_duplex)
1da177e4
LT
1127 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1128
f26251eb 1129 if (nic->flags & promiscuous || nic->loopback) {
1da177e4
LT
1130 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1131 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1132 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1133 }
1134
f26251eb 1135 if (nic->flags & multicast_all)
1da177e4
LT
1136 config->multicast_all = 0x1; /* 1=accept, 0=no */
1137
6bdacb1a 1138 /* disable WoL when up */
f26251eb 1139 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1140 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1141
f26251eb 1142 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1143 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1144 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1145 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1146 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1147 if (nic->mac >= mac_82559_D101M) {
1da177e4 1148 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1149 /* Enable TCO in extended config */
1150 if (nic->mac >= mac_82551_10) {
1151 config->byte_count = 0x20; /* extended bytes */
1152 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1153 }
1154 } else {
1da177e4 1155 config->standard_stat_counter = 0x0;
44e4925e 1156 }
1da177e4
LT
1157 }
1158
1159 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1160 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1161 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1162 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1163 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1164 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1165}
1166
2afecc04
JB
1167/*************************************************************************
1168* CPUSaver parameters
1169*
1170* All CPUSaver parameters are 16-bit literals that are part of a
1171* "move immediate value" instruction. By changing the value of
1172* the literal in the instruction before the code is loaded, the
1173* driver can change the algorithm.
1174*
0779bf2d 1175* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1176* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1177* timer is reset each time a new packet is received. (see
1178* BUNDLEMAX below to set the limit on number of chained packets)
1179* The current default is 0x600 or 1536. Experiments show that
1180* the value should probably stay within the 0x200 - 0x1000.
1181*
05479938 1182* BUNDLEMAX -
2afecc04
JB
1183* This sets the maximum number of frames that will be bundled. In
1184* some situations, such as the TCP windowing algorithm, it may be
1185* better to limit the growth of the bundle size than let it go as
1186* high as it can, because that could cause too much added latency.
1187* The default is six, because this is the number of packets in the
1188* default TCP window size. A value of 1 would make CPUSaver indicate
1189* an interrupt for every frame received. If you do not want to put
1190* a limit on the bundle size, set this value to xFFFF.
1191*
05479938 1192* BUNDLESMALL -
2afecc04
JB
1193* This contains a bit-mask describing the minimum size frame that
1194* will be bundled. The default masks the lower 7 bits, which means
1195* that any frame less than 128 bytes in length will not be bundled,
1196* but will instead immediately generate an interrupt. This does
1197* not affect the current bundle in any way. Any frame that is 128
1198* bytes or large will be bundled normally. This feature is meant
1199* to provide immediate indication of ACK frames in a TCP environment.
1200* Customers were seeing poor performance when a machine with CPUSaver
1201* enabled was sending but not receiving. The delay introduced when
1202* the ACKs were received was enough to reduce total throughput, because
1203* the sender would sit idle until the ACK was finally seen.
1204*
1205* The current default is 0xFF80, which masks out the lower 7 bits.
1206* This means that any frame which is x7F (127) bytes or smaller
05479938 1207* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1208* bit mask, there are only a few valid values that can be used. To
1209* turn this feature off, the driver can write the value xFFFF to the
1210* lower word of this instruction (in the same way that the other
1211* parameters are used). Likewise, a value of 0xF800 (2047) would
1212* cause an interrupt to be generated for every frame, because all
1213* standard Ethernet frames are <= 2047 bytes in length.
1214*************************************************************************/
1215
05479938 1216/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1217 * workarounds it provides, set the following defines to:
1218 * BUNDLESMALL 0
1219 * BUNDLEMAX 1
1220 * INTDELAY 1
1221 */
1222#define BUNDLESMALL 1
1223#define BUNDLEMAX (u16)6
1224#define INTDELAY (u16)1536 /* 0x600 */
1225
9ac32e1b
JSR
1226/* Initialize firmware */
1227static const struct firmware *e100_request_firmware(struct nic *nic)
1228{
1229 const char *fw_name;
7e15b0c9 1230 const struct firmware *fw = nic->fw;
9ac32e1b 1231 u8 timer, bundle, min_size;
7e15b0c9 1232 int err = 0;
9ac32e1b 1233
2afecc04
JB
1234 /* do not load u-code for ICH devices */
1235 if (nic->flags & ich)
9ac32e1b 1236 return NULL;
2afecc04 1237
44c10138 1238 /* Search for ucode match against h/w revision */
9ac32e1b
JSR
1239 if (nic->mac == mac_82559_D101M)
1240 fw_name = FIRMWARE_D101M;
1241 else if (nic->mac == mac_82559_D101S)
1242 fw_name = FIRMWARE_D101S;
1243 else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
1244 fw_name = FIRMWARE_D102E;
1245 else /* No ucode on other devices */
1246 return NULL;
1247
7e15b0c9
DG
1248 /* If the firmware has not previously been loaded, request a pointer
1249 * to it. If it was previously loaded, we are reinitializing the
1250 * adapter, possibly in a resume from hibernate, in which case
1251 * request_firmware() cannot be used.
1252 */
1253 if (!fw)
1254 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1255
9ac32e1b
JSR
1256 if (err) {
1257 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
1258 fw_name, err);
1259 return ERR_PTR(err);
1260 }
7e15b0c9 1261
9ac32e1b
JSR
1262 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1263 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1264 if (fw->size != UCODE_SIZE * 4 + 3) {
1265 DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
1266 fw_name, fw->size);
1267 release_firmware(fw);
1268 return ERR_PTR(-EINVAL);
2afecc04
JB
1269 }
1270
9ac32e1b
JSR
1271 /* Read timer, bundle and min_size from end of firmware blob */
1272 timer = fw->data[UCODE_SIZE * 4];
1273 bundle = fw->data[UCODE_SIZE * 4 + 1];
1274 min_size = fw->data[UCODE_SIZE * 4 + 2];
1275
1276 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1277 min_size >= UCODE_SIZE) {
1278 DPRINTK(PROBE, ERR,
1279 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1280 fw_name, timer, bundle, min_size);
1281 release_firmware(fw);
1282 return ERR_PTR(-EINVAL);
1283 }
7e15b0c9
DG
1284
1285 /* OK, firmware is validated and ready to use. Save a pointer
1286 * to it in the nic */
1287 nic->fw = fw;
9ac32e1b 1288 return fw;
24180333
JB
1289}
1290
9ac32e1b
JSR
1291static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1292 struct sk_buff *skb)
24180333 1293{
9ac32e1b
JSR
1294 const struct firmware *fw = (void *)skb;
1295 u8 timer, bundle, min_size;
1296
1297 /* It's not a real skb; we just abused the fact that e100_exec_cb
1298 will pass it through to here... */
1299 cb->skb = NULL;
1300
1301 /* firmware is stored as little endian already */
1302 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1303
1304 /* Read timer, bundle and min_size from end of firmware blob */
1305 timer = fw->data[UCODE_SIZE * 4];
1306 bundle = fw->data[UCODE_SIZE * 4 + 1];
1307 min_size = fw->data[UCODE_SIZE * 4 + 2];
1308
1309 /* Insert user-tunable settings in cb->u.ucode */
1310 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1311 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1312 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1313 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1314 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1315 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1316
1317 cb->command = cpu_to_le16(cb_ucode | cb_el);
1318}
1319
1320static inline int e100_load_ucode_wait(struct nic *nic)
1321{
1322 const struct firmware *fw;
24180333
JB
1323 int err = 0, counter = 50;
1324 struct cb *cb = nic->cb_to_clean;
1325
9ac32e1b
JSR
1326 fw = e100_request_firmware(nic);
1327 /* If it's NULL, then no ucode is required */
1328 if (!fw || IS_ERR(fw))
1329 return PTR_ERR(fw);
1330
1331 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
24180333 1332 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1333
24180333
JB
1334 /* must restart cuc */
1335 nic->cuc_cmd = cuc_start;
1336
1337 /* wait for completion */
1338 e100_write_flush(nic);
1339 udelay(10);
1340
1341 /* wait for possibly (ouch) 500ms */
1342 while (!(cb->status & cpu_to_le16(cb_complete))) {
1343 msleep(10);
1344 if (!--counter) break;
1345 }
05479938 1346
3a4fa0a2 1347 /* ack any interrupts, something could have been set */
27345bb6 1348 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1349
1350 /* if the command failed, or is not OK, notify and return */
1351 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1352 DPRINTK(PROBE,ERR, "ucode load failed\n");
1353 err = -EPERM;
1354 }
05479938 1355
24180333 1356 return err;
1da177e4
LT
1357}
1358
1359static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1360 struct sk_buff *skb)
1361{
1362 cb->command = cpu_to_le16(cb_iaaddr);
1363 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1364}
1365
1366static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1367{
1368 cb->command = cpu_to_le16(cb_dump);
1369 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1370 offsetof(struct mem, dump_buf));
1371}
1372
72001762
AM
1373static int e100_phy_check_without_mii(struct nic *nic)
1374{
1375 u8 phy_type;
1376 int without_mii;
1377
1378 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1379
1380 switch (phy_type) {
1381 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1382 case I82503: /* Non-MII PHY; UNTESTED! */
1383 case S80C24: /* Non-MII PHY; tested and working */
1384 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1385 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1386 * doesn't have a programming interface of any sort. The
1387 * media is sensed automatically based on how the link partner
1388 * is configured. This is, in essence, manual configuration.
1389 */
1390 DPRINTK(PROBE, INFO,
1391 "found MII-less i82503 or 80c24 or other PHY\n");
1392
1393 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1394 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1395
1396 /* these might be needed for certain MII-less cards...
1397 * nic->flags |= ich;
1398 * nic->flags |= ich_10h_workaround; */
1399
1400 without_mii = 1;
1401 break;
1402 default:
1403 without_mii = 0;
1404 break;
1405 }
1406 return without_mii;
1407}
1408
1da177e4
LT
1409#define NCONFIG_AUTO_SWITCH 0x0080
1410#define MII_NSC_CONG MII_RESV1
1411#define NSC_CONG_ENABLE 0x0100
1412#define NSC_CONG_TXREADY 0x0400
1413#define ADVERTISE_FC_SUPPORTED 0x0400
1414static int e100_phy_init(struct nic *nic)
1415{
1416 struct net_device *netdev = nic->netdev;
1417 u32 addr;
1418 u16 bmcr, stat, id_lo, id_hi, cong;
1419
1420 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
f26251eb 1421 for (addr = 0; addr < 32; addr++) {
1da177e4
LT
1422 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1423 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1424 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1425 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
f26251eb 1426 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1da177e4
LT
1427 break;
1428 }
72001762
AM
1429 if (addr == 32) {
1430 /* uhoh, no PHY detected: check whether we seem to be some
1431 * weird, rare variant which is *known* to not have any MII.
1432 * But do this AFTER MII checking only, since this does
1433 * lookup of EEPROM values which may easily be unreliable. */
1434 if (e100_phy_check_without_mii(nic))
1435 return 0; /* simply return and hope for the best */
1436 else {
1437 /* for unknown cases log a fatal error */
1438 DPRINTK(HW, ERR,
1439 "Failed to locate any known PHY, aborting.\n");
1440 return -EAGAIN;
1441 }
1442 } else
1443 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1da177e4 1444
1da177e4
LT
1445 /* Get phy ID */
1446 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1447 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1448 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1449 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1450
8fbd962e
BA
1451 /* Select the phy and isolate the rest */
1452 for (addr = 0; addr < 32; addr++) {
1453 if (addr != nic->mii.phy_id) {
1454 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1455 } else if (nic->phy != phy_82552_v) {
1456 bmcr = mdio_read(netdev, addr, MII_BMCR);
1457 mdio_write(netdev, addr, MII_BMCR,
1458 bmcr & ~BMCR_ISOLATE);
1459 }
1460 }
1461 /*
1462 * Workaround for 82552:
1463 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1464 * other phy_id's) using bmcr value from addr discovery loop above.
1465 */
1466 if (nic->phy == phy_82552_v)
1467 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1468 bmcr & ~BMCR_ISOLATE);
1469
1da177e4
LT
1470 /* Handle National tx phys */
1471#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
f26251eb 1472 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1da177e4
LT
1473 /* Disable congestion control */
1474 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1475 cong |= NSC_CONG_TXREADY;
1476 cong &= ~NSC_CONG_ENABLE;
1477 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1478 }
1479
b55de80e
BA
1480 if (nic->phy == phy_82552_v) {
1481 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1482
72001762
AM
1483 /* assign special tweaked mdio_ctrl() function */
1484 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1485
b55de80e
BA
1486 /* Workaround Si not advertising flow-control during autoneg */
1487 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1488 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1489
1490 /* Reset for the above changes to take effect */
1491 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1492 bmcr |= BMCR_RESET;
1493 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1494 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1495 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1496 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1497 /* enable/disable MDI/MDI-X auto-switching. */
1498 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1499 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1500 }
1da177e4
LT
1501
1502 return 0;
1503}
1504
1505static int e100_hw_init(struct nic *nic)
1506{
1507 int err;
1508
1509 e100_hw_reset(nic);
1510
1511 DPRINTK(HW, ERR, "e100_hw_init\n");
f26251eb 1512 if (!in_interrupt() && (err = e100_self_test(nic)))
1da177e4
LT
1513 return err;
1514
f26251eb 1515 if ((err = e100_phy_init(nic)))
1da177e4 1516 return err;
f26251eb 1517 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1da177e4 1518 return err;
f26251eb 1519 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1da177e4 1520 return err;
9ac32e1b 1521 if ((err = e100_load_ucode_wait(nic)))
1da177e4 1522 return err;
f26251eb 1523 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1da177e4 1524 return err;
f26251eb 1525 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1da177e4 1526 return err;
f26251eb 1527 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1da177e4
LT
1528 nic->dma_addr + offsetof(struct mem, stats))))
1529 return err;
f26251eb 1530 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1da177e4
LT
1531 return err;
1532
1533 e100_disable_irq(nic);
1534
1535 return 0;
1536}
1537
1538static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1539{
1540 struct net_device *netdev = nic->netdev;
48e2f183 1541 struct dev_mc_list *list;
4cd24eaf 1542 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1da177e4
LT
1543
1544 cb->command = cpu_to_le16(cb_multi);
1545 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
48e2f183
JP
1546 i = 0;
1547 netdev_for_each_mc_addr(list, netdev) {
1548 if (i == count)
1549 break;
1550 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr,
1da177e4 1551 ETH_ALEN);
48e2f183 1552 }
1da177e4
LT
1553}
1554
1555static void e100_set_multicast_list(struct net_device *netdev)
1556{
1557 struct nic *nic = netdev_priv(netdev);
1558
1559 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
4cd24eaf 1560 netdev_mc_count(netdev), netdev->flags);
1da177e4 1561
f26251eb 1562 if (netdev->flags & IFF_PROMISC)
1da177e4
LT
1563 nic->flags |= promiscuous;
1564 else
1565 nic->flags &= ~promiscuous;
1566
f26251eb 1567 if (netdev->flags & IFF_ALLMULTI ||
4cd24eaf 1568 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1da177e4
LT
1569 nic->flags |= multicast_all;
1570 else
1571 nic->flags &= ~multicast_all;
1572
1573 e100_exec_cb(nic, NULL, e100_configure);
1574 e100_exec_cb(nic, NULL, e100_multi);
1575}
1576
1577static void e100_update_stats(struct nic *nic)
1578{
09f75cd7
JG
1579 struct net_device *dev = nic->netdev;
1580 struct net_device_stats *ns = &dev->stats;
1da177e4 1581 struct stats *s = &nic->mem->stats;
aaf918ba
AV
1582 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1583 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1da177e4
LT
1584 &s->complete;
1585
1586 /* Device's stats reporting may take several microseconds to
0a0863af 1587 * complete, so we're always waiting for results of the
1da177e4
LT
1588 * previous command. */
1589
f26251eb 1590 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1da177e4
LT
1591 *complete = 0;
1592 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1593 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1594 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1595 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1596 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1597 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1598 ns->collisions += nic->tx_collisions;
1599 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1600 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1601 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1602 nic->rx_over_length_errors;
1603 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1604 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1605 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1606 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1607 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1608 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1609 le32_to_cpu(s->rx_alignment_errors) +
1610 le32_to_cpu(s->rx_short_frame_errors) +
1611 le32_to_cpu(s->rx_cdt_errors);
1612 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1613 nic->tx_single_collisions +=
1614 le32_to_cpu(s->tx_single_collisions);
1615 nic->tx_multiple_collisions +=
1616 le32_to_cpu(s->tx_multiple_collisions);
f26251eb 1617 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1618 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1619 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1620 nic->rx_fc_unsupported +=
1621 le32_to_cpu(s->fc_rcv_unsupported);
f26251eb 1622 if (nic->mac >= mac_82559_D101M) {
1da177e4
LT
1623 nic->tx_tco_frames +=
1624 le16_to_cpu(s->xmt_tco_frames);
1625 nic->rx_tco_frames +=
1626 le16_to_cpu(s->rcv_tco_frames);
1627 }
1628 }
1629 }
1630
05479938 1631
f26251eb 1632 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1f53367d 1633 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1634}
1635
1636static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1637{
1638 /* Adjust inter-frame-spacing (IFS) between two transmits if
1639 * we're getting collisions on a half-duplex connection. */
1640
f26251eb 1641 if (duplex == DUPLEX_HALF) {
1da177e4
LT
1642 u32 prev = nic->adaptive_ifs;
1643 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1644
f26251eb 1645 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1da177e4 1646 (nic->tx_frames > min_frames)) {
f26251eb 1647 if (nic->adaptive_ifs < 60)
1da177e4
LT
1648 nic->adaptive_ifs += 5;
1649 } else if (nic->tx_frames < min_frames) {
f26251eb 1650 if (nic->adaptive_ifs >= 5)
1da177e4
LT
1651 nic->adaptive_ifs -= 5;
1652 }
f26251eb 1653 if (nic->adaptive_ifs != prev)
1da177e4
LT
1654 e100_exec_cb(nic, NULL, e100_configure);
1655 }
1656}
1657
1658static void e100_watchdog(unsigned long data)
1659{
1660 struct nic *nic = (struct nic *)data;
1661 struct ethtool_cmd cmd;
1662
1663 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1664
1665 /* mii library handles link maintenance tasks */
1666
1667 mii_ethtool_gset(&nic->mii, &cmd);
1668
f26251eb 1669 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
f4113030
JK
1670 printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
1671 nic->netdev->name,
1672 cmd.speed == SPEED_100 ? "100" : "10",
1673 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
f26251eb 1674 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
f4113030
JK
1675 printk(KERN_INFO "e100: %s NIC Link is Down\n",
1676 nic->netdev->name);
1da177e4
LT
1677 }
1678
1679 mii_check_link(&nic->mii);
1680
1681 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1682 * allocation failure.
1683 * Unfortunately have to use a spinlock to not re-enable interrupts
1684 * accidentally, due to hardware that shares a register between the
1685 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1686 spin_lock_irq(&nic->cmd_lock);
27345bb6 1687 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1688 e100_write_flush(nic);
ad8c48ad 1689 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1690
1691 e100_update_stats(nic);
1692 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1693
f26251eb 1694 if (nic->mac <= mac_82557_D100_C)
1da177e4
LT
1695 /* Issue a multicast command to workaround a 557 lock up */
1696 e100_set_multicast_list(nic->netdev);
1697
f26251eb 1698 if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1da177e4
LT
1699 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1700 nic->flags |= ich_10h_workaround;
1701 else
1702 nic->flags &= ~ich_10h_workaround;
1703
34c6417b
SH
1704 mod_timer(&nic->watchdog,
1705 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1706}
1707
858119e1 1708static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1709 struct sk_buff *skb)
1710{
1711 cb->command = nic->tx_command;
962082b6 1712 /* interrupt every 16 packets regardless of delay */
f26251eb 1713 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
996ec353 1714 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1715 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1716 cb->u.tcb.tcb_byte_count = 0;
1717 cb->u.tcb.threshold = nic->tx_threshold;
1718 cb->u.tcb.tbd_count = 1;
1719 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1720 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1721 /* check for mapping failure? */
1da177e4
LT
1722 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1723}
1724
3b29a56d
SH
1725static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1726 struct net_device *netdev)
1da177e4
LT
1727{
1728 struct nic *nic = netdev_priv(netdev);
1729 int err;
1730
f26251eb 1731 if (nic->flags & ich_10h_workaround) {
1da177e4
LT
1732 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1733 Issue a NOP command followed by a 1us delay before
1734 issuing the Tx command. */
f26251eb 1735 if (e100_exec_cmd(nic, cuc_nop, 0))
1f53367d 1736 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1737 udelay(1);
1738 }
1739
1740 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1741
f26251eb 1742 switch (err) {
1da177e4
LT
1743 case -ENOSPC:
1744 /* We queued the skb, but now we're out of space. */
1745 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1746 netif_stop_queue(netdev);
1747 break;
1748 case -ENOMEM:
1749 /* This is a hard error - log it. */
1750 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1751 netif_stop_queue(netdev);
5b548140 1752 return NETDEV_TX_BUSY;
1da177e4
LT
1753 }
1754
1755 netdev->trans_start = jiffies;
6ed10654 1756 return NETDEV_TX_OK;
1da177e4
LT
1757}
1758
858119e1 1759static int e100_tx_clean(struct nic *nic)
1da177e4 1760{
09f75cd7 1761 struct net_device *dev = nic->netdev;
1da177e4
LT
1762 struct cb *cb;
1763 int tx_cleaned = 0;
1764
1765 spin_lock(&nic->cb_lock);
1766
1da177e4 1767 /* Clean CBs marked complete */
f26251eb 1768 for (cb = nic->cb_to_clean;
1da177e4
LT
1769 cb->status & cpu_to_le16(cb_complete);
1770 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1771 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1772 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1773 cb->status);
1774
f26251eb 1775 if (likely(cb->skb != NULL)) {
09f75cd7
JG
1776 dev->stats.tx_packets++;
1777 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1778
1779 pci_unmap_single(nic->pdev,
1780 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1781 le16_to_cpu(cb->u.tcb.tbd.size),
1782 PCI_DMA_TODEVICE);
1783 dev_kfree_skb_any(cb->skb);
1784 cb->skb = NULL;
1785 tx_cleaned = 1;
1786 }
1787 cb->status = 0;
1788 nic->cbs_avail++;
1789 }
1790
1791 spin_unlock(&nic->cb_lock);
1792
1793 /* Recover from running out of Tx resources in xmit_frame */
f26251eb 1794 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1da177e4
LT
1795 netif_wake_queue(nic->netdev);
1796
1797 return tx_cleaned;
1798}
1799
1800static void e100_clean_cbs(struct nic *nic)
1801{
f26251eb
BA
1802 if (nic->cbs) {
1803 while (nic->cbs_avail != nic->params.cbs.count) {
1da177e4 1804 struct cb *cb = nic->cb_to_clean;
f26251eb 1805 if (cb->skb) {
1da177e4
LT
1806 pci_unmap_single(nic->pdev,
1807 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1808 le16_to_cpu(cb->u.tcb.tbd.size),
1809 PCI_DMA_TODEVICE);
1810 dev_kfree_skb(cb->skb);
1811 }
1812 nic->cb_to_clean = nic->cb_to_clean->next;
1813 nic->cbs_avail++;
1814 }
98468efd 1815 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1da177e4
LT
1816 nic->cbs = NULL;
1817 nic->cbs_avail = 0;
1818 }
1819 nic->cuc_cmd = cuc_start;
1820 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1821 nic->cbs;
1822}
1823
1824static int e100_alloc_cbs(struct nic *nic)
1825{
1826 struct cb *cb;
1827 unsigned int i, count = nic->params.cbs.count;
1828
1829 nic->cuc_cmd = cuc_start;
1830 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1831 nic->cbs_avail = 0;
1832
98468efd
RO
1833 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1834 &nic->cbs_dma_addr);
f26251eb 1835 if (!nic->cbs)
1da177e4 1836 return -ENOMEM;
70abc8cb 1837 memset(nic->cbs, 0, count * sizeof(struct cb));
1da177e4 1838
f26251eb 1839 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1da177e4
LT
1840 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1841 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1842
1843 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1844 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1845 ((i+1) % count) * sizeof(struct cb));
1da177e4
LT
1846 }
1847
1848 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1849 nic->cbs_avail = count;
1850
1851 return 0;
1852}
1853
ca93ca42 1854static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1855{
f26251eb
BA
1856 if (!nic->rxs) return;
1857 if (RU_SUSPENDED != nic->ru_running) return;
ca93ca42
JG
1858
1859 /* handle init time starts */
f26251eb 1860 if (!rx) rx = nic->rxs;
ca93ca42
JG
1861
1862 /* (Re)start RU if suspended or idle and RFA is non-NULL */
f26251eb 1863 if (rx->skb) {
ca93ca42
JG
1864 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1865 nic->ru_running = RU_RUNNING;
1866 }
1da177e4
LT
1867}
1868
1869#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1870static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1871{
89d71a66 1872 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1da177e4
LT
1873 return -ENOMEM;
1874
89d71a66 1875 /* Init, and map the RFD. */
27d7ff46 1876 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1877 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1878 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1879
8d8bb39b 1880 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1f53367d 1881 dev_kfree_skb_any(rx->skb);
097688ef 1882 rx->skb = NULL;
1f53367d
MC
1883 rx->dma_addr = 0;
1884 return -ENOMEM;
1885 }
1886
1da177e4 1887 /* Link the RFD to end of RFA by linking previous RFD to
7734f6e6
DA
1888 * this one. We are safe to touch the previous RFD because
1889 * it is protected by the before last buffer's el bit being set */
aaf918ba 1890 if (rx->prev->skb) {
1da177e4 1891 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
6caf52a4 1892 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1923815d 1893 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
773c9c1f 1894 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1895 }
1896
1897 return 0;
1898}
1899
858119e1 1900static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1901 unsigned int *work_done, unsigned int work_to_do)
1902{
09f75cd7 1903 struct net_device *dev = nic->netdev;
1da177e4
LT
1904 struct sk_buff *skb = rx->skb;
1905 struct rfd *rfd = (struct rfd *)skb->data;
1906 u16 rfd_status, actual_size;
1907
f26251eb 1908 if (unlikely(work_done && *work_done >= work_to_do))
1da177e4
LT
1909 return -EAGAIN;
1910
1911 /* Need to sync before taking a peek at cb_complete bit */
1912 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
773c9c1f 1913 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1914 rfd_status = le16_to_cpu(rfd->status);
1915
1916 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1917
1918 /* If data isn't ready, nothing to indicate */
7734f6e6
DA
1919 if (unlikely(!(rfd_status & cb_complete))) {
1920 /* If the next buffer has the el bit, but we think the receiver
1921 * is still running, check to see if it really stopped while
1922 * we had interrupts off.
1923 * This allows for a fast restart without re-enabling
1924 * interrupts */
1925 if ((le16_to_cpu(rfd->command) & cb_el) &&
1926 (RU_RUNNING == nic->ru_running))
1927
17393dd6 1928 if (ioread8(&nic->csr->scb.status) & rus_no_res)
7734f6e6 1929 nic->ru_running = RU_SUSPENDED;
303d67c2
KH
1930 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1931 sizeof(struct rfd),
6ff9c2e7 1932 PCI_DMA_FROMDEVICE);
1f53367d 1933 return -ENODATA;
7734f6e6 1934 }
1da177e4
LT
1935
1936 /* Get actual data size */
1937 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
f26251eb 1938 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1da177e4
LT
1939 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1940
1941 /* Get data */
1942 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 1943 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4 1944
7734f6e6
DA
1945 /* If this buffer has the el bit, but we think the receiver
1946 * is still running, check to see if it really stopped while
1947 * we had interrupts off.
1948 * This allows for a fast restart without re-enabling interrupts.
1949 * This can happen when the RU sees the size change but also sees
1950 * the el bit set. */
1951 if ((le16_to_cpu(rfd->command) & cb_el) &&
1952 (RU_RUNNING == nic->ru_running)) {
1953
17393dd6 1954 if (ioread8(&nic->csr->scb.status) & rus_no_res)
ca93ca42 1955 nic->ru_running = RU_SUSPENDED;
7734f6e6 1956 }
ca93ca42 1957
1da177e4
LT
1958 /* Pull off the RFD and put the actual data (minus eth hdr) */
1959 skb_reserve(skb, sizeof(struct rfd));
1960 skb_put(skb, actual_size);
1961 skb->protocol = eth_type_trans(skb, nic->netdev);
1962
f26251eb 1963 if (unlikely(!(rfd_status & cb_ok))) {
1da177e4 1964 /* Don't indicate if hardware indicates errors */
1da177e4 1965 dev_kfree_skb_any(skb);
f26251eb 1966 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1967 /* Don't indicate oversized frames */
1968 nic->rx_over_length_errors++;
1da177e4
LT
1969 dev_kfree_skb_any(skb);
1970 } else {
09f75cd7
JG
1971 dev->stats.rx_packets++;
1972 dev->stats.rx_bytes += actual_size;
1da177e4 1973 netif_receive_skb(skb);
f26251eb 1974 if (work_done)
1da177e4
LT
1975 (*work_done)++;
1976 }
1977
1978 rx->skb = NULL;
1979
1980 return 0;
1981}
1982
858119e1 1983static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1984 unsigned int work_to_do)
1985{
1986 struct rx *rx;
7734f6e6
DA
1987 int restart_required = 0, err = 0;
1988 struct rx *old_before_last_rx, *new_before_last_rx;
1989 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1da177e4
LT
1990
1991 /* Indicate newly arrived packets */
f26251eb 1992 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
7734f6e6
DA
1993 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1994 /* Hit quota or no more to clean */
1995 if (-EAGAIN == err || -ENODATA == err)
ca93ca42 1996 break;
1da177e4
LT
1997 }
1998
7734f6e6
DA
1999
2000 /* On EAGAIN, hit quota so have more work to do, restart once
2001 * cleanup is complete.
2002 * Else, are we already rnr? then pay attention!!! this ensures that
2003 * the state machine progression never allows a start with a
2004 * partially cleaned list, avoiding a race between hardware
2005 * and rx_to_clean when in NAPI mode */
2006 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2007 restart_required = 1;
2008
2009 old_before_last_rx = nic->rx_to_use->prev->prev;
2010 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
ca93ca42 2011
1da177e4 2012 /* Alloc new skbs to refill list */
f26251eb
BA
2013 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2014 if (unlikely(e100_rx_alloc_skb(nic, rx)))
1da177e4
LT
2015 break; /* Better luck next time (see watchdog) */
2016 }
ca93ca42 2017
7734f6e6
DA
2018 new_before_last_rx = nic->rx_to_use->prev->prev;
2019 if (new_before_last_rx != old_before_last_rx) {
2020 /* Set the el-bit on the buffer that is before the last buffer.
2021 * This lets us update the next pointer on the last buffer
2022 * without worrying about hardware touching it.
2023 * We set the size to 0 to prevent hardware from touching this
2024 * buffer.
2025 * When the hardware hits the before last buffer with el-bit
2026 * and size of 0, it will RNR interrupt, the RUS will go into
2027 * the No Resources state. It will not complete nor write to
2028 * this buffer. */
2029 new_before_last_rfd =
2030 (struct rfd *)new_before_last_rx->skb->data;
2031 new_before_last_rfd->size = 0;
2032 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2033 pci_dma_sync_single_for_device(nic->pdev,
2034 new_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2035 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2036
2037 /* Now that we have a new stopping point, we can clear the old
2038 * stopping point. We must sync twice to get the proper
2039 * ordering on the hardware side of things. */
2040 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2041 pci_dma_sync_single_for_device(nic->pdev,
2042 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2043 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2044 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
2045 pci_dma_sync_single_for_device(nic->pdev,
2046 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 2047 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
2048 }
2049
f26251eb 2050 if (restart_required) {
ca93ca42 2051 // ack the rnr?
915e91d7 2052 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
7734f6e6 2053 e100_start_receiver(nic, nic->rx_to_clean);
f26251eb 2054 if (work_done)
ca93ca42
JG
2055 (*work_done)++;
2056 }
1da177e4
LT
2057}
2058
2059static void e100_rx_clean_list(struct nic *nic)
2060{
2061 struct rx *rx;
2062 unsigned int i, count = nic->params.rfds.count;
2063
ca93ca42
JG
2064 nic->ru_running = RU_UNINITIALIZED;
2065
f26251eb
BA
2066 if (nic->rxs) {
2067 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2068 if (rx->skb) {
1da177e4 2069 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 2070 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2071 dev_kfree_skb(rx->skb);
2072 }
2073 }
2074 kfree(nic->rxs);
2075 nic->rxs = NULL;
2076 }
2077
2078 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
2079}
2080
2081static int e100_rx_alloc_list(struct nic *nic)
2082{
2083 struct rx *rx;
2084 unsigned int i, count = nic->params.rfds.count;
7734f6e6 2085 struct rfd *before_last;
1da177e4
LT
2086
2087 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 2088 nic->ru_running = RU_UNINITIALIZED;
1da177e4 2089
f26251eb 2090 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 2091 return -ENOMEM;
1da177e4 2092
f26251eb 2093 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
1da177e4
LT
2094 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2095 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
f26251eb 2096 if (e100_rx_alloc_skb(nic, rx)) {
1da177e4
LT
2097 e100_rx_clean_list(nic);
2098 return -ENOMEM;
2099 }
2100 }
7734f6e6
DA
2101 /* Set the el-bit on the buffer that is before the last buffer.
2102 * This lets us update the next pointer on the last buffer without
2103 * worrying about hardware touching it.
2104 * We set the size to 0 to prevent hardware from touching this buffer.
2105 * When the hardware hits the before last buffer with el-bit and size
2106 * of 0, it will RNR interrupt, the RU will go into the No Resources
2107 * state. It will not complete nor write to this buffer. */
2108 rx = nic->rxs->prev->prev;
2109 before_last = (struct rfd *)rx->skb->data;
2110 before_last->command |= cpu_to_le16(cb_el);
2111 before_last->size = 0;
2112 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
773c9c1f 2113 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
2114
2115 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 2116 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
2117
2118 return 0;
2119}
2120
7d12e780 2121static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
2122{
2123 struct net_device *netdev = dev_id;
2124 struct nic *nic = netdev_priv(netdev);
27345bb6 2125 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4
LT
2126
2127 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
2128
f26251eb 2129 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
1da177e4
LT
2130 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2131 return IRQ_NONE;
2132
2133 /* Ack interrupt(s) */
27345bb6 2134 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 2135
ca93ca42 2136 /* We hit Receive No Resource (RNR); restart RU after cleaning */
f26251eb 2137 if (stat_ack & stat_ack_rnr)
ca93ca42
JG
2138 nic->ru_running = RU_SUSPENDED;
2139
288379f0 2140 if (likely(napi_schedule_prep(&nic->napi))) {
0685c31b 2141 e100_disable_irq(nic);
288379f0 2142 __napi_schedule(&nic->napi);
0685c31b 2143 }
1da177e4
LT
2144
2145 return IRQ_HANDLED;
2146}
2147
bea3348e 2148static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 2149{
bea3348e 2150 struct nic *nic = container_of(napi, struct nic, napi);
ddfce6bb 2151 unsigned int work_done = 0;
1da177e4 2152
bea3348e 2153 e100_rx_clean(nic, &work_done, budget);
53e52c72 2154 e100_tx_clean(nic);
1da177e4 2155
53e52c72
DM
2156 /* If budget not fully consumed, exit the polling mode */
2157 if (work_done < budget) {
288379f0 2158 napi_complete(napi);
1da177e4 2159 e100_enable_irq(nic);
1da177e4
LT
2160 }
2161
bea3348e 2162 return work_done;
1da177e4
LT
2163}
2164
2165#ifdef CONFIG_NET_POLL_CONTROLLER
2166static void e100_netpoll(struct net_device *netdev)
2167{
2168 struct nic *nic = netdev_priv(netdev);
611494dc 2169
1da177e4 2170 e100_disable_irq(nic);
7d12e780 2171 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2172 e100_tx_clean(nic);
2173 e100_enable_irq(nic);
2174}
2175#endif
2176
1da177e4
LT
2177static int e100_set_mac_address(struct net_device *netdev, void *p)
2178{
2179 struct nic *nic = netdev_priv(netdev);
2180 struct sockaddr *addr = p;
2181
2182 if (!is_valid_ether_addr(addr->sa_data))
2183 return -EADDRNOTAVAIL;
2184
2185 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2186 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2187
2188 return 0;
2189}
2190
2191static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2192{
f26251eb 2193 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1da177e4
LT
2194 return -EINVAL;
2195 netdev->mtu = new_mtu;
2196 return 0;
2197}
2198
2199static int e100_asf(struct nic *nic)
2200{
2201 /* ASF can be enabled from eeprom */
2202 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2203 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2204 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2205 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2206}
2207
2208static int e100_up(struct nic *nic)
2209{
2210 int err;
2211
f26251eb 2212 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2213 return err;
f26251eb 2214 if ((err = e100_alloc_cbs(nic)))
1da177e4 2215 goto err_rx_clean_list;
f26251eb 2216 if ((err = e100_hw_init(nic)))
1da177e4
LT
2217 goto err_clean_cbs;
2218 e100_set_multicast_list(nic->netdev);
ca93ca42 2219 e100_start_receiver(nic, NULL);
1da177e4 2220 mod_timer(&nic->watchdog, jiffies);
f26251eb 2221 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2222 nic->netdev->name, nic->netdev)))
2223 goto err_no_irq;
1da177e4 2224 netif_wake_queue(nic->netdev);
bea3348e 2225 napi_enable(&nic->napi);
0236ebb7
MC
2226 /* enable ints _after_ enabling poll, preventing a race between
2227 * disable ints+schedule */
2228 e100_enable_irq(nic);
1da177e4
LT
2229 return 0;
2230
2231err_no_irq:
2232 del_timer_sync(&nic->watchdog);
2233err_clean_cbs:
2234 e100_clean_cbs(nic);
2235err_rx_clean_list:
2236 e100_rx_clean_list(nic);
2237 return err;
2238}
2239
2240static void e100_down(struct nic *nic)
2241{
0236ebb7 2242 /* wait here for poll to complete */
bea3348e 2243 napi_disable(&nic->napi);
0236ebb7 2244 netif_stop_queue(nic->netdev);
1da177e4
LT
2245 e100_hw_reset(nic);
2246 free_irq(nic->pdev->irq, nic->netdev);
2247 del_timer_sync(&nic->watchdog);
2248 netif_carrier_off(nic->netdev);
1da177e4
LT
2249 e100_clean_cbs(nic);
2250 e100_rx_clean_list(nic);
2251}
2252
2253static void e100_tx_timeout(struct net_device *netdev)
2254{
2255 struct nic *nic = netdev_priv(netdev);
2256
05479938 2257 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2258 * in interrupt context */
2259 schedule_work(&nic->tx_timeout_task);
2260}
2261
c4028958 2262static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2263{
c4028958
DH
2264 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2265 struct net_device *netdev = nic->netdev;
2acdb1e0 2266
1da177e4 2267 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
27345bb6 2268 ioread8(&nic->csr->scb.status));
401da6ae
AC
2269
2270 rtnl_lock();
2271 if (netif_running(netdev)) {
2272 e100_down(netdev_priv(netdev));
2273 e100_up(netdev_priv(netdev));
2274 }
2275 rtnl_unlock();
1da177e4
LT
2276}
2277
2278static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2279{
2280 int err;
2281 struct sk_buff *skb;
2282
2283 /* Use driver resources to perform internal MAC or PHY
2284 * loopback test. A single packet is prepared and transmitted
2285 * in loopback mode, and the test passes if the received
2286 * packet compares byte-for-byte to the transmitted packet. */
2287
f26251eb 2288 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2289 return err;
f26251eb 2290 if ((err = e100_alloc_cbs(nic)))
1da177e4
LT
2291 goto err_clean_rx;
2292
2293 /* ICH PHY loopback is broken so do MAC loopback instead */
f26251eb 2294 if (nic->flags & ich && loopback_mode == lb_phy)
1da177e4
LT
2295 loopback_mode = lb_mac;
2296
2297 nic->loopback = loopback_mode;
f26251eb 2298 if ((err = e100_hw_init(nic)))
1da177e4
LT
2299 goto err_loopback_none;
2300
f26251eb 2301 if (loopback_mode == lb_phy)
1da177e4
LT
2302 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2303 BMCR_LOOPBACK);
2304
ca93ca42 2305 e100_start_receiver(nic, NULL);
1da177e4 2306
f26251eb 2307 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2308 err = -ENOMEM;
2309 goto err_loopback_none;
2310 }
2311 skb_put(skb, ETH_DATA_LEN);
2312 memset(skb->data, 0xFF, ETH_DATA_LEN);
2313 e100_xmit_frame(skb, nic->netdev);
2314
2315 msleep(10);
2316
aa49cdd9 2317 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
773c9c1f 2318 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
aa49cdd9 2319
f26251eb 2320 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
1da177e4
LT
2321 skb->data, ETH_DATA_LEN))
2322 err = -EAGAIN;
2323
2324err_loopback_none:
2325 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2326 nic->loopback = lb_none;
1da177e4 2327 e100_clean_cbs(nic);
aa49cdd9 2328 e100_hw_reset(nic);
1da177e4
LT
2329err_clean_rx:
2330 e100_rx_clean_list(nic);
2331 return err;
2332}
2333
2334#define MII_LED_CONTROL 0x1B
b55de80e
BA
2335#define E100_82552_LED_OVERRIDE 0x19
2336#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2337#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
1da177e4
LT
2338static void e100_blink_led(unsigned long data)
2339{
2340 struct nic *nic = (struct nic *)data;
2341 enum led_state {
2342 led_on = 0x01,
2343 led_off = 0x04,
2344 led_on_559 = 0x05,
2345 led_on_557 = 0x07,
2346 };
b55de80e
BA
2347 u16 led_reg = MII_LED_CONTROL;
2348
2349 if (nic->phy == phy_82552_v) {
2350 led_reg = E100_82552_LED_OVERRIDE;
1da177e4 2351
b55de80e
BA
2352 nic->leds = (nic->leds == E100_82552_LED_ON) ?
2353 E100_82552_LED_OFF : E100_82552_LED_ON;
2354 } else {
2355 nic->leds = (nic->leds & led_on) ? led_off :
2356 (nic->mac < mac_82559_D101M) ? led_on_557 :
2357 led_on_559;
2358 }
2359 mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
1da177e4
LT
2360 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2361}
2362
2363static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2364{
2365 struct nic *nic = netdev_priv(netdev);
2366 return mii_ethtool_gset(&nic->mii, cmd);
2367}
2368
2369static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2370{
2371 struct nic *nic = netdev_priv(netdev);
2372 int err;
2373
2374 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2375 err = mii_ethtool_sset(&nic->mii, cmd);
2376 e100_exec_cb(nic, NULL, e100_configure);
2377
2378 return err;
2379}
2380
2381static void e100_get_drvinfo(struct net_device *netdev,
2382 struct ethtool_drvinfo *info)
2383{
2384 struct nic *nic = netdev_priv(netdev);
2385 strcpy(info->driver, DRV_NAME);
2386 strcpy(info->version, DRV_VERSION);
2387 strcpy(info->fw_version, "N/A");
2388 strcpy(info->bus_info, pci_name(nic->pdev));
2389}
2390
abf9b902 2391#define E100_PHY_REGS 0x1C
1da177e4
LT
2392static int e100_get_regs_len(struct net_device *netdev)
2393{
2394 struct nic *nic = netdev_priv(netdev);
abf9b902 2395 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
1da177e4
LT
2396}
2397
2398static void e100_get_regs(struct net_device *netdev,
2399 struct ethtool_regs *regs, void *p)
2400{
2401 struct nic *nic = netdev_priv(netdev);
2402 u32 *buff = p;
2403 int i;
2404
44c10138 2405 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2406 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2407 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2408 ioread16(&nic->csr->scb.status);
f26251eb 2409 for (i = E100_PHY_REGS; i >= 0; i--)
1da177e4
LT
2410 buff[1 + E100_PHY_REGS - i] =
2411 mdio_read(netdev, nic->mii.phy_id, i);
2412 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2413 e100_exec_cb(nic, NULL, e100_dump);
2414 msleep(10);
2415 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2416 sizeof(nic->mem->dump_buf));
2417}
2418
2419static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2420{
2421 struct nic *nic = netdev_priv(netdev);
2422 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2423 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2424}
2425
2426static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2427{
2428 struct nic *nic = netdev_priv(netdev);
2429
bc79fc84
RW
2430 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2431 !device_can_wakeup(&nic->pdev->dev))
1da177e4
LT
2432 return -EOPNOTSUPP;
2433
f26251eb 2434 if (wol->wolopts)
1da177e4
LT
2435 nic->flags |= wol_magic;
2436 else
2437 nic->flags &= ~wol_magic;
2438
bc79fc84
RW
2439 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2440
1da177e4
LT
2441 e100_exec_cb(nic, NULL, e100_configure);
2442
2443 return 0;
2444}
2445
2446static u32 e100_get_msglevel(struct net_device *netdev)
2447{
2448 struct nic *nic = netdev_priv(netdev);
2449 return nic->msg_enable;
2450}
2451
2452static void e100_set_msglevel(struct net_device *netdev, u32 value)
2453{
2454 struct nic *nic = netdev_priv(netdev);
2455 nic->msg_enable = value;
2456}
2457
2458static int e100_nway_reset(struct net_device *netdev)
2459{
2460 struct nic *nic = netdev_priv(netdev);
2461 return mii_nway_restart(&nic->mii);
2462}
2463
2464static u32 e100_get_link(struct net_device *netdev)
2465{
2466 struct nic *nic = netdev_priv(netdev);
2467 return mii_link_ok(&nic->mii);
2468}
2469
2470static int e100_get_eeprom_len(struct net_device *netdev)
2471{
2472 struct nic *nic = netdev_priv(netdev);
2473 return nic->eeprom_wc << 1;
2474}
2475
2476#define E100_EEPROM_MAGIC 0x1234
2477static int e100_get_eeprom(struct net_device *netdev,
2478 struct ethtool_eeprom *eeprom, u8 *bytes)
2479{
2480 struct nic *nic = netdev_priv(netdev);
2481
2482 eeprom->magic = E100_EEPROM_MAGIC;
2483 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2484
2485 return 0;
2486}
2487
2488static int e100_set_eeprom(struct net_device *netdev,
2489 struct ethtool_eeprom *eeprom, u8 *bytes)
2490{
2491 struct nic *nic = netdev_priv(netdev);
2492
f26251eb 2493 if (eeprom->magic != E100_EEPROM_MAGIC)
1da177e4
LT
2494 return -EINVAL;
2495
2496 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2497
2498 return e100_eeprom_save(nic, eeprom->offset >> 1,
2499 (eeprom->len >> 1) + 1);
2500}
2501
2502static void e100_get_ringparam(struct net_device *netdev,
2503 struct ethtool_ringparam *ring)
2504{
2505 struct nic *nic = netdev_priv(netdev);
2506 struct param_range *rfds = &nic->params.rfds;
2507 struct param_range *cbs = &nic->params.cbs;
2508
2509 ring->rx_max_pending = rfds->max;
2510 ring->tx_max_pending = cbs->max;
2511 ring->rx_mini_max_pending = 0;
2512 ring->rx_jumbo_max_pending = 0;
2513 ring->rx_pending = rfds->count;
2514 ring->tx_pending = cbs->count;
2515 ring->rx_mini_pending = 0;
2516 ring->rx_jumbo_pending = 0;
2517}
2518
2519static int e100_set_ringparam(struct net_device *netdev,
2520 struct ethtool_ringparam *ring)
2521{
2522 struct nic *nic = netdev_priv(netdev);
2523 struct param_range *rfds = &nic->params.rfds;
2524 struct param_range *cbs = &nic->params.cbs;
2525
05479938 2526 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2527 return -EINVAL;
2528
f26251eb 2529 if (netif_running(netdev))
1da177e4
LT
2530 e100_down(nic);
2531 rfds->count = max(ring->rx_pending, rfds->min);
2532 rfds->count = min(rfds->count, rfds->max);
2533 cbs->count = max(ring->tx_pending, cbs->min);
2534 cbs->count = min(cbs->count, cbs->max);
2535 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2536 rfds->count, cbs->count);
f26251eb 2537 if (netif_running(netdev))
1da177e4
LT
2538 e100_up(nic);
2539
2540 return 0;
2541}
2542
2543static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2544 "Link test (on/offline)",
2545 "Eeprom test (on/offline)",
2546 "Self test (offline)",
2547 "Mac loopback (offline)",
2548 "Phy loopback (offline)",
2549};
4c3616cd 2550#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
1da177e4 2551
1da177e4
LT
2552static void e100_diag_test(struct net_device *netdev,
2553 struct ethtool_test *test, u64 *data)
2554{
2555 struct ethtool_cmd cmd;
2556 struct nic *nic = netdev_priv(netdev);
2557 int i, err;
2558
2559 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2560 data[0] = !mii_link_ok(&nic->mii);
2561 data[1] = e100_eeprom_load(nic);
f26251eb 2562 if (test->flags & ETH_TEST_FL_OFFLINE) {
1da177e4
LT
2563
2564 /* save speed, duplex & autoneg settings */
2565 err = mii_ethtool_gset(&nic->mii, &cmd);
2566
f26251eb 2567 if (netif_running(netdev))
1da177e4
LT
2568 e100_down(nic);
2569 data[2] = e100_self_test(nic);
2570 data[3] = e100_loopback_test(nic, lb_mac);
2571 data[4] = e100_loopback_test(nic, lb_phy);
2572
2573 /* restore speed, duplex & autoneg settings */
2574 err = mii_ethtool_sset(&nic->mii, &cmd);
2575
f26251eb 2576 if (netif_running(netdev))
1da177e4
LT
2577 e100_up(nic);
2578 }
f26251eb 2579 for (i = 0; i < E100_TEST_LEN; i++)
1da177e4 2580 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2581
2582 msleep_interruptible(4 * 1000);
1da177e4
LT
2583}
2584
2585static int e100_phys_id(struct net_device *netdev, u32 data)
2586{
2587 struct nic *nic = netdev_priv(netdev);
b55de80e
BA
2588 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2589 MII_LED_CONTROL;
1da177e4 2590
f26251eb 2591 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
1da177e4
LT
2592 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2593 mod_timer(&nic->blink_timer, jiffies);
2594 msleep_interruptible(data * 1000);
2595 del_timer_sync(&nic->blink_timer);
b55de80e 2596 mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
1da177e4
LT
2597
2598 return 0;
2599}
2600
2601static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2602 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2603 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2604 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2605 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2606 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2607 "tx_heartbeat_errors", "tx_window_errors",
2608 /* device-specific stats */
2609 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2610 "tx_flow_control_pause", "rx_flow_control_pause",
2611 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2612};
2613#define E100_NET_STATS_LEN 21
4c3616cd 2614#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
1da177e4 2615
b9f2c044 2616static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2617{
b9f2c044
JG
2618 switch (sset) {
2619 case ETH_SS_TEST:
2620 return E100_TEST_LEN;
2621 case ETH_SS_STATS:
2622 return E100_STATS_LEN;
2623 default:
2624 return -EOPNOTSUPP;
2625 }
1da177e4
LT
2626}
2627
2628static void e100_get_ethtool_stats(struct net_device *netdev,
2629 struct ethtool_stats *stats, u64 *data)
2630{
2631 struct nic *nic = netdev_priv(netdev);
2632 int i;
2633
f26251eb 2634 for (i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2635 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2636
2637 data[i++] = nic->tx_deferred;
2638 data[i++] = nic->tx_single_collisions;
2639 data[i++] = nic->tx_multiple_collisions;
2640 data[i++] = nic->tx_fc_pause;
2641 data[i++] = nic->rx_fc_pause;
2642 data[i++] = nic->rx_fc_unsupported;
2643 data[i++] = nic->tx_tco_frames;
2644 data[i++] = nic->rx_tco_frames;
2645}
2646
2647static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2648{
f26251eb 2649 switch (stringset) {
1da177e4
LT
2650 case ETH_SS_TEST:
2651 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2652 break;
2653 case ETH_SS_STATS:
2654 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2655 break;
2656 }
2657}
2658
7282d491 2659static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2660 .get_settings = e100_get_settings,
2661 .set_settings = e100_set_settings,
2662 .get_drvinfo = e100_get_drvinfo,
2663 .get_regs_len = e100_get_regs_len,
2664 .get_regs = e100_get_regs,
2665 .get_wol = e100_get_wol,
2666 .set_wol = e100_set_wol,
2667 .get_msglevel = e100_get_msglevel,
2668 .set_msglevel = e100_set_msglevel,
2669 .nway_reset = e100_nway_reset,
2670 .get_link = e100_get_link,
2671 .get_eeprom_len = e100_get_eeprom_len,
2672 .get_eeprom = e100_get_eeprom,
2673 .set_eeprom = e100_set_eeprom,
2674 .get_ringparam = e100_get_ringparam,
2675 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2676 .self_test = e100_diag_test,
2677 .get_strings = e100_get_strings,
2678 .phys_id = e100_phys_id,
1da177e4 2679 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2680 .get_sset_count = e100_get_sset_count,
1da177e4
LT
2681};
2682
2683static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2684{
2685 struct nic *nic = netdev_priv(netdev);
2686
2687 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2688}
2689
2690static int e100_alloc(struct nic *nic)
2691{
2692 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2693 &nic->dma_addr);
2694 return nic->mem ? 0 : -ENOMEM;
2695}
2696
2697static void e100_free(struct nic *nic)
2698{
f26251eb 2699 if (nic->mem) {
1da177e4
LT
2700 pci_free_consistent(nic->pdev, sizeof(struct mem),
2701 nic->mem, nic->dma_addr);
2702 nic->mem = NULL;
2703 }
2704}
2705
2706static int e100_open(struct net_device *netdev)
2707{
2708 struct nic *nic = netdev_priv(netdev);
2709 int err = 0;
2710
2711 netif_carrier_off(netdev);
f26251eb 2712 if ((err = e100_up(nic)))
1da177e4
LT
2713 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2714 return err;
2715}
2716
2717static int e100_close(struct net_device *netdev)
2718{
2719 e100_down(netdev_priv(netdev));
2720 return 0;
2721}
2722
acc78426
SH
2723static const struct net_device_ops e100_netdev_ops = {
2724 .ndo_open = e100_open,
2725 .ndo_stop = e100_close,
00829823 2726 .ndo_start_xmit = e100_xmit_frame,
acc78426
SH
2727 .ndo_validate_addr = eth_validate_addr,
2728 .ndo_set_multicast_list = e100_set_multicast_list,
2729 .ndo_set_mac_address = e100_set_mac_address,
2730 .ndo_change_mtu = e100_change_mtu,
2731 .ndo_do_ioctl = e100_do_ioctl,
2732 .ndo_tx_timeout = e100_tx_timeout,
2733#ifdef CONFIG_NET_POLL_CONTROLLER
2734 .ndo_poll_controller = e100_netpoll,
2735#endif
2736};
2737
1da177e4
LT
2738static int __devinit e100_probe(struct pci_dev *pdev,
2739 const struct pci_device_id *ent)
2740{
2741 struct net_device *netdev;
2742 struct nic *nic;
2743 int err;
2744
f26251eb
BA
2745 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2746 if (((1 << debug) - 1) & NETIF_MSG_PROBE)
1da177e4
LT
2747 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2748 return -ENOMEM;
2749 }
2750
acc78426 2751 netdev->netdev_ops = &e100_netdev_ops;
1da177e4 2752 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
1da177e4 2753 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
0eb5a34c 2754 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2755
2756 nic = netdev_priv(netdev);
bea3348e 2757 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2758 nic->netdev = netdev;
2759 nic->pdev = pdev;
2760 nic->msg_enable = (1 << debug) - 1;
72001762 2761 nic->mdio_ctrl = mdio_ctrl_hw;
1da177e4
LT
2762 pci_set_drvdata(pdev, netdev);
2763
f26251eb 2764 if ((err = pci_enable_device(pdev))) {
1da177e4
LT
2765 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2766 goto err_out_free_dev;
2767 }
2768
f26251eb 2769 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1da177e4
LT
2770 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2771 "base address, aborting.\n");
2772 err = -ENODEV;
2773 goto err_out_disable_pdev;
2774 }
2775
f26251eb 2776 if ((err = pci_request_regions(pdev, DRV_NAME))) {
1da177e4
LT
2777 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2778 goto err_out_disable_pdev;
2779 }
2780
284901a9 2781 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1da177e4
LT
2782 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2783 goto err_out_free_res;
2784 }
2785
1da177e4
LT
2786 SET_NETDEV_DEV(netdev, &pdev->dev);
2787
27345bb6
JB
2788 if (use_io)
2789 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2790
2791 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
f26251eb 2792 if (!nic->csr) {
1da177e4
LT
2793 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2794 err = -ENOMEM;
2795 goto err_out_free_res;
2796 }
2797
f26251eb 2798 if (ent->driver_data)
1da177e4
LT
2799 nic->flags |= ich;
2800 else
2801 nic->flags &= ~ich;
2802
2803 e100_get_defaults(nic);
2804
1f53367d 2805 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2806 spin_lock_init(&nic->cb_lock);
2807 spin_lock_init(&nic->cmd_lock);
ac7c6669 2808 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2809
2810 /* Reset the device before pci_set_master() in case device is in some
2811 * funky state and has an interrupt pending - hint: we don't have the
2812 * interrupt handler registered yet. */
2813 e100_hw_reset(nic);
2814
2815 pci_set_master(pdev);
2816
2817 init_timer(&nic->watchdog);
2818 nic->watchdog.function = e100_watchdog;
2819 nic->watchdog.data = (unsigned long)nic;
2820 init_timer(&nic->blink_timer);
2821 nic->blink_timer.function = e100_blink_led;
2822 nic->blink_timer.data = (unsigned long)nic;
2823
c4028958 2824 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2825
f26251eb 2826 if ((err = e100_alloc(nic))) {
1da177e4
LT
2827 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2828 goto err_out_iounmap;
2829 }
2830
f26251eb 2831 if ((err = e100_eeprom_load(nic)))
1da177e4
LT
2832 goto err_out_free;
2833
f92d8728
MC
2834 e100_phy_init(nic);
2835
1da177e4 2836 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2837 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2838 if (!is_valid_ether_addr(netdev->perm_addr)) {
2839 if (!eeprom_bad_csum_allow) {
2840 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2841 "EEPROM, aborting.\n");
2842 err = -EAGAIN;
2843 goto err_out_free;
2844 } else {
2845 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2846 "you MUST configure one.\n");
2847 }
1da177e4
LT
2848 }
2849
2850 /* Wol magic packet can be enabled from eeprom */
f26251eb 2851 if ((nic->mac >= mac_82558_D101_A4) &&
bc79fc84 2852 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
1da177e4 2853 nic->flags |= wol_magic;
bc79fc84
RW
2854 device_set_wakeup_enable(&pdev->dev, true);
2855 }
1da177e4 2856
6bdacb1a 2857 /* ack any pending wake events, disable PME */
e7272403 2858 pci_pme_active(pdev, false);
1da177e4
LT
2859
2860 strcpy(netdev->name, "eth%d");
f26251eb 2861 if ((err = register_netdev(netdev))) {
1da177e4
LT
2862 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2863 goto err_out_free;
2864 }
98468efd
RO
2865 nic->cbs_pool = pci_pool_create(netdev->name,
2866 nic->pdev,
211a0d94 2867 nic->params.cbs.max * sizeof(struct cb),
98468efd
RO
2868 sizeof(u32),
2869 0);
e174961c 2870 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
0795af57 2871 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
e174961c 2872 pdev->irq, netdev->dev_addr);
1da177e4
LT
2873
2874 return 0;
2875
2876err_out_free:
2877 e100_free(nic);
2878err_out_iounmap:
27345bb6 2879 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2880err_out_free_res:
2881 pci_release_regions(pdev);
2882err_out_disable_pdev:
2883 pci_disable_device(pdev);
2884err_out_free_dev:
2885 pci_set_drvdata(pdev, NULL);
2886 free_netdev(netdev);
2887 return err;
2888}
2889
2890static void __devexit e100_remove(struct pci_dev *pdev)
2891{
2892 struct net_device *netdev = pci_get_drvdata(pdev);
2893
f26251eb 2894 if (netdev) {
1da177e4
LT
2895 struct nic *nic = netdev_priv(netdev);
2896 unregister_netdev(netdev);
2897 e100_free(nic);
915e91d7 2898 pci_iounmap(pdev, nic->csr);
98468efd 2899 pci_pool_destroy(nic->cbs_pool);
1da177e4
LT
2900 free_netdev(netdev);
2901 pci_release_regions(pdev);
2902 pci_disable_device(pdev);
2903 pci_set_drvdata(pdev, NULL);
2904 }
2905}
2906
b55de80e
BA
2907#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
2908#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
2909#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
ac7c992c 2910static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
1da177e4
LT
2911{
2912 struct net_device *netdev = pci_get_drvdata(pdev);
2913 struct nic *nic = netdev_priv(netdev);
2914
824545e7 2915 if (netif_running(netdev))
f902283b 2916 e100_down(nic);
518d8338 2917 netif_device_detach(netdev);
a53a33da 2918
1da177e4 2919 pci_save_state(pdev);
e8e82b76
AK
2920
2921 if ((nic->flags & wol_magic) | e100_asf(nic)) {
b55de80e
BA
2922 /* enable reverse auto-negotiation */
2923 if (nic->phy == phy_82552_v) {
2924 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2925 E100_82552_SMARTSPEED);
2926
2927 mdio_write(netdev, nic->mii.phy_id,
2928 E100_82552_SMARTSPEED, smartspeed |
2929 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
2930 }
ac7c992c 2931 *enable_wake = true;
e8e82b76 2932 } else {
ac7c992c 2933 *enable_wake = false;
e8e82b76 2934 }
975b366a 2935
8543da66 2936 pci_disable_device(pdev);
ac7c992c 2937}
1da177e4 2938
ac7c992c
TLSC
2939static int __e100_power_off(struct pci_dev *pdev, bool wake)
2940{
6905b1f1 2941 if (wake)
ac7c992c 2942 return pci_prepare_to_sleep(pdev);
6905b1f1
RW
2943
2944 pci_wake_from_d3(pdev, false);
2945 pci_set_power_state(pdev, PCI_D3hot);
2946
2947 return 0;
1da177e4
LT
2948}
2949
f902283b 2950#ifdef CONFIG_PM
ac7c992c
TLSC
2951static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2952{
2953 bool wake;
2954 __e100_shutdown(pdev, &wake);
2955 return __e100_power_off(pdev, wake);
2956}
2957
1da177e4
LT
2958static int e100_resume(struct pci_dev *pdev)
2959{
2960 struct net_device *netdev = pci_get_drvdata(pdev);
2961 struct nic *nic = netdev_priv(netdev);
2962
975b366a 2963 pci_set_power_state(pdev, PCI_D0);
1da177e4 2964 pci_restore_state(pdev);
6bdacb1a 2965 /* ack any pending wake events, disable PME */
975b366a 2966 pci_enable_wake(pdev, 0, 0);
1da177e4 2967
4b512d26 2968 /* disable reverse auto-negotiation */
b55de80e
BA
2969 if (nic->phy == phy_82552_v) {
2970 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2971 E100_82552_SMARTSPEED);
2972
2973 mdio_write(netdev, nic->mii.phy_id,
2974 E100_82552_SMARTSPEED,
2975 smartspeed & ~(E100_82552_REV_ANEG));
2976 }
2977
1da177e4 2978 netif_device_attach(netdev);
975b366a 2979 if (netif_running(netdev))
1da177e4
LT
2980 e100_up(nic);
2981
2982 return 0;
2983}
975b366a 2984#endif /* CONFIG_PM */
1da177e4 2985
d18c3db5 2986static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2987{
ac7c992c
TLSC
2988 bool wake;
2989 __e100_shutdown(pdev, &wake);
2990 if (system_state == SYSTEM_POWER_OFF)
2991 __e100_power_off(pdev, wake);
6bdacb1a
MC
2992}
2993
2cc30492
AK
2994/* ------------------ PCI Error Recovery infrastructure -------------- */
2995/**
2996 * e100_io_error_detected - called when PCI error is detected.
2997 * @pdev: Pointer to PCI device
0a0863af 2998 * @state: The current pci connection state
2cc30492
AK
2999 */
3000static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3001{
3002 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 3003 struct nic *nic = netdev_priv(netdev);
2cc30492 3004
2cc30492 3005 netif_device_detach(netdev);
ef681ce1
AD
3006
3007 if (state == pci_channel_io_perm_failure)
3008 return PCI_ERS_RESULT_DISCONNECT;
3009
3010 if (netif_running(netdev))
3011 e100_down(nic);
b1d26f24 3012 pci_disable_device(pdev);
2cc30492
AK
3013
3014 /* Request a slot reset. */
3015 return PCI_ERS_RESULT_NEED_RESET;
3016}
3017
3018/**
3019 * e100_io_slot_reset - called after the pci bus has been reset.
3020 * @pdev: Pointer to PCI device
3021 *
3022 * Restart the card from scratch.
3023 */
3024static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3025{
3026 struct net_device *netdev = pci_get_drvdata(pdev);
3027 struct nic *nic = netdev_priv(netdev);
3028
3029 if (pci_enable_device(pdev)) {
3030 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
3031 return PCI_ERS_RESULT_DISCONNECT;
3032 }
3033 pci_set_master(pdev);
3034
3035 /* Only one device per card can do a reset */
3036 if (0 != PCI_FUNC(pdev->devfn))
3037 return PCI_ERS_RESULT_RECOVERED;
3038 e100_hw_reset(nic);
3039 e100_phy_init(nic);
3040
3041 return PCI_ERS_RESULT_RECOVERED;
3042}
3043
3044/**
3045 * e100_io_resume - resume normal operations
3046 * @pdev: Pointer to PCI device
3047 *
3048 * Resume normal operations after an error recovery
3049 * sequence has been completed.
3050 */
3051static void e100_io_resume(struct pci_dev *pdev)
3052{
3053 struct net_device *netdev = pci_get_drvdata(pdev);
3054 struct nic *nic = netdev_priv(netdev);
3055
3056 /* ack any pending wake events, disable PME */
3057 pci_enable_wake(pdev, 0, 0);
3058
3059 netif_device_attach(netdev);
3060 if (netif_running(netdev)) {
3061 e100_open(netdev);
3062 mod_timer(&nic->watchdog, jiffies);
3063 }
3064}
3065
3066static struct pci_error_handlers e100_err_handler = {
3067 .error_detected = e100_io_error_detected,
3068 .slot_reset = e100_io_slot_reset,
3069 .resume = e100_io_resume,
3070};
6bdacb1a 3071
1da177e4
LT
3072static struct pci_driver e100_driver = {
3073 .name = DRV_NAME,
3074 .id_table = e100_id_table,
3075 .probe = e100_probe,
3076 .remove = __devexit_p(e100_remove),
e8e82b76 3077#ifdef CONFIG_PM
975b366a 3078 /* Power Management hooks */
1da177e4
LT
3079 .suspend = e100_suspend,
3080 .resume = e100_resume,
3081#endif
05479938 3082 .shutdown = e100_shutdown,
2cc30492 3083 .err_handler = &e100_err_handler,
1da177e4
LT
3084};
3085
3086static int __init e100_init_module(void)
3087{
f26251eb 3088 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
1da177e4
LT
3089 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3090 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
3091 }
29917620 3092 return pci_register_driver(&e100_driver);
1da177e4
LT
3093}
3094
3095static void __exit e100_cleanup_module(void)
3096{
3097 pci_unregister_driver(&e100_driver);
3098}
3099
3100module_init(e100_init_module);
3101module_exit(e100_cleanup_module);