[PATCH] irq-flags: drivers/net: Use the new IRQF_ constants
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / starfire.c
1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
2 /*
3 Written 1998-2000 by Donald Becker.
4
5 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version.
8
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
15
16 The information below comes from Donald Becker's original driver:
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23 Support and updates available at
24 http://www.scyld.com/network/starfire.html
25
26 -----------------------------------------------------------
27
28 Linux kernel-specific changes:
29
30 LK1.1.1 (jgarzik):
31 - Use PCI driver interface
32 - Fix MOD_xxx races
33 - softnet fixups
34
35 LK1.1.2 (jgarzik):
36 - Merge Becker version 0.15
37
38 LK1.1.3 (Andrew Morton)
39 - Timer cleanups
40
41 LK1.1.4 (jgarzik):
42 - Merge Becker version 1.03
43
44 LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
45 - Support hardware Rx/Tx checksumming
46 - Use the GFP firmware taken from Adaptec's Netware driver
47
48 LK1.2.2 (Ion Badulescu)
49 - Backported to 2.2.x
50
51 LK1.2.3 (Ion Badulescu)
52 - Fix the flaky mdio interface
53 - More compat clean-ups
54
55 LK1.2.4 (Ion Badulescu)
56 - More 2.2.x initialization fixes
57
58 LK1.2.5 (Ion Badulescu)
59 - Several fixes from Manfred Spraul
60
61 LK1.2.6 (Ion Badulescu)
62 - Fixed ifup/ifdown/ifup problem in 2.4.x
63
64 LK1.2.7 (Ion Badulescu)
65 - Removed unused code
66 - Made more functions static and __init
67
68 LK1.2.8 (Ion Badulescu)
69 - Quell bogus error messages, inform about the Tx threshold
70 - Removed #ifdef CONFIG_PCI, this driver is PCI only
71
72 LK1.2.9 (Ion Badulescu)
73 - Merged Jeff Garzik's changes from 2.4.4-pre5
74 - Added 2.2.x compatibility stuff required by the above changes
75
76 LK1.2.9a (Ion Badulescu)
77 - More updates from Jeff Garzik
78
79 LK1.3.0 (Ion Badulescu)
80 - Merged zerocopy support
81
82 LK1.3.1 (Ion Badulescu)
83 - Added ethtool support
84 - Added GPIO (media change) interrupt support
85
86 LK1.3.2 (Ion Badulescu)
87 - Fixed 2.2.x compatibility issues introduced in 1.3.1
88 - Fixed ethtool ioctl returning uninitialized memory
89
90 LK1.3.3 (Ion Badulescu)
91 - Initialize the TxMode register properly
92 - Don't dereference dev->priv after freeing it
93
94 LK1.3.4 (Ion Badulescu)
95 - Fixed initialization timing problems
96 - Fixed interrupt mask definitions
97
98 LK1.3.5 (jgarzik)
99 - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
100
101 LK1.3.6:
102 - Sparc64 support and fixes (Ion Badulescu)
103 - Better stats and error handling (Ion Badulescu)
104 - Use new pci_set_mwi() PCI API function (jgarzik)
105
106 LK1.3.7 (Ion Badulescu)
107 - minimal implementation of tx_timeout()
108 - correctly shutdown the Rx/Tx engines in netdev_close()
109 - added calls to netif_carrier_on/off
110 (patch from Stefan Rompf <srompf@isg.de>)
111 - VLAN support
112
113 LK1.3.8 (Ion Badulescu)
114 - adjust DMA burst size on sparc64
115 - 64-bit support
116 - reworked zerocopy support for 64-bit buffers
117 - working and usable interrupt mitigation/latency
118 - reduced Tx interrupt frequency for lower interrupt overhead
119
120 LK1.3.9 (Ion Badulescu)
121 - bugfix for mcast filter
122 - enable the right kind of Tx interrupts (TxDMADone, not TxDone)
123
124 LK1.4.0 (Ion Badulescu)
125 - NAPI support
126
127 LK1.4.1 (Ion Badulescu)
128 - flush PCI posting buffers after disabling Rx interrupts
129 - put the chip to a D3 slumber on driver unload
130 - added config option to enable/disable NAPI
131
132 LK1.4.2 (Ion Badulescu)
133 - finally added firmware (GPL'ed by Adaptec)
134 - removed compatibility code for 2.2.x
135
136 LK1.4.2.1 (Ion Badulescu)
137 - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM
138 - added 32-bit padding to outgoing skb's, removed previous workaround
139
140 TODO: - fix forced speed/duplexing code (broken a long time ago, when
141 somebody converted the driver to use the generic MII code)
142 - fix VLAN support
143 */
144
145 #define DRV_NAME "starfire"
146 #define DRV_VERSION "1.03+LK1.4.2.1"
147 #define DRV_RELDATE "October 3, 2005"
148
149 #include <linux/module.h>
150 #include <linux/kernel.h>
151 #include <linux/pci.h>
152 #include <linux/netdevice.h>
153 #include <linux/etherdevice.h>
154 #include <linux/init.h>
155 #include <linux/delay.h>
156 #include <linux/crc32.h>
157 #include <linux/ethtool.h>
158 #include <linux/mii.h>
159 #include <linux/if_vlan.h>
160 #include <asm/processor.h> /* Processor type for cache alignment. */
161 #include <asm/uaccess.h>
162 #include <asm/io.h>
163
164 #include "starfire_firmware.h"
165 /*
166 * The current frame processor firmware fails to checksum a fragment
167 * of length 1. If and when this is fixed, the #define below can be removed.
168 */
169 #define HAS_BROKEN_FIRMWARE
170
171 /*
172 * If using the broken firmware, data must be padded to the next 32-bit boundary.
173 */
174 #ifdef HAS_BROKEN_FIRMWARE
175 #define PADDING_MASK 3
176 #endif
177
178 /*
179 * Define this if using the driver with the zero-copy patch
180 */
181 #define ZEROCOPY
182
183 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
184 #define VLAN_SUPPORT
185 #endif
186
187 #ifndef CONFIG_ADAPTEC_STARFIRE_NAPI
188 #undef HAVE_NETDEV_POLL
189 #endif
190
191 /* The user-configurable values.
192 These may be modified when a driver module is loaded.*/
193
194 /* Used for tuning interrupt latency vs. overhead. */
195 static int intr_latency;
196 static int small_frames;
197
198 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
199 static int max_interrupt_work = 20;
200 static int mtu;
201 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
202 The Starfire has a 512 element hash table based on the Ethernet CRC. */
203 static const int multicast_filter_limit = 512;
204 /* Whether to do TCP/UDP checksums in hardware */
205 static int enable_hw_cksum = 1;
206
207 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
208 /*
209 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
210 * Setting to > 1518 effectively disables this feature.
211 *
212 * NOTE:
213 * The ia64 doesn't allow for unaligned loads even of integers being
214 * misaligned on a 2 byte boundary. Thus always force copying of
215 * packets as the starfire doesn't allow for misaligned DMAs ;-(
216 * 23/10/2000 - Jes
217 *
218 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
219 * at least, having unaligned frames leads to a rather serious performance
220 * penalty. -Ion
221 */
222 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
223 static int rx_copybreak = PKT_BUF_SZ;
224 #else
225 static int rx_copybreak /* = 0 */;
226 #endif
227
228 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
229 #ifdef __sparc__
230 #define DMA_BURST_SIZE 64
231 #else
232 #define DMA_BURST_SIZE 128
233 #endif
234
235 /* Used to pass the media type, etc.
236 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
237 The media type is usually passed in 'options[]'.
238 These variables are deprecated, use ethtool instead. -Ion
239 */
240 #define MAX_UNITS 8 /* More are supported, limit only on options */
241 static int options[MAX_UNITS] = {0, };
242 static int full_duplex[MAX_UNITS] = {0, };
243
244 /* Operational parameters that are set at compile time. */
245
246 /* The "native" ring sizes are either 256 or 2048.
247 However in some modes a descriptor may be marked to wrap the ring earlier.
248 */
249 #define RX_RING_SIZE 256
250 #define TX_RING_SIZE 32
251 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
252 #define DONE_Q_SIZE 1024
253 /* All queues must be aligned on a 256-byte boundary */
254 #define QUEUE_ALIGN 256
255
256 #if RX_RING_SIZE > 256
257 #define RX_Q_ENTRIES Rx2048QEntries
258 #else
259 #define RX_Q_ENTRIES Rx256QEntries
260 #endif
261
262 /* Operational parameters that usually are not changed. */
263 /* Time in jiffies before concluding the transmitter is hung. */
264 #define TX_TIMEOUT (2 * HZ)
265
266 /*
267 * This SUCKS.
268 * We need a much better method to determine if dma_addr_t is 64-bit.
269 */
270 #if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
271 /* 64-bit dma_addr_t */
272 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
273 #define netdrv_addr_t u64
274 #define cpu_to_dma(x) cpu_to_le64(x)
275 #define dma_to_cpu(x) le64_to_cpu(x)
276 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
277 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
278 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
279 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
280 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
281 #else /* 32-bit dma_addr_t */
282 #define netdrv_addr_t u32
283 #define cpu_to_dma(x) cpu_to_le32(x)
284 #define dma_to_cpu(x) le32_to_cpu(x)
285 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
286 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
287 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
288 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
289 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
290 #endif
291
292 #define skb_first_frag_len(skb) skb_headlen(skb)
293 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
294
295 #ifdef HAVE_NETDEV_POLL
296 #define init_poll(dev) \
297 do { \
298 dev->poll = &netdev_poll; \
299 dev->weight = max_interrupt_work; \
300 } while (0)
301 #define netdev_rx(dev, ioaddr) \
302 do { \
303 u32 intr_enable; \
304 if (netif_rx_schedule_prep(dev)) { \
305 __netif_rx_schedule(dev); \
306 intr_enable = readl(ioaddr + IntrEnable); \
307 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
308 writel(intr_enable, ioaddr + IntrEnable); \
309 readl(ioaddr + IntrEnable); /* flush PCI posting buffers */ \
310 } else { \
311 /* Paranoia check */ \
312 intr_enable = readl(ioaddr + IntrEnable); \
313 if (intr_enable & (IntrRxDone | IntrRxEmpty)) { \
314 printk(KERN_INFO "%s: interrupt while in polling mode!\n", dev->name); \
315 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
316 writel(intr_enable, ioaddr + IntrEnable); \
317 } \
318 } \
319 } while (0)
320 #define netdev_receive_skb(skb) netif_receive_skb(skb)
321 #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid)
322 static int netdev_poll(struct net_device *dev, int *budget);
323 #else /* not HAVE_NETDEV_POLL */
324 #define init_poll(dev)
325 #define netdev_receive_skb(skb) netif_rx(skb)
326 #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid)
327 #define netdev_rx(dev, ioaddr) \
328 do { \
329 int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \
330 __netdev_rx(dev, &quota);\
331 } while (0)
332 #endif /* not HAVE_NETDEV_POLL */
333 /* end of compatibility code */
334
335
336 /* These identify the driver base version and may not be removed. */
337 static const char version[] __devinitdata =
338 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
339 KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
340
341 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
342 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
343 MODULE_LICENSE("GPL");
344 MODULE_VERSION(DRV_VERSION);
345
346 module_param(max_interrupt_work, int, 0);
347 module_param(mtu, int, 0);
348 module_param(debug, int, 0);
349 module_param(rx_copybreak, int, 0);
350 module_param(intr_latency, int, 0);
351 module_param(small_frames, int, 0);
352 module_param_array(options, int, NULL, 0);
353 module_param_array(full_duplex, int, NULL, 0);
354 module_param(enable_hw_cksum, int, 0);
355 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
356 MODULE_PARM_DESC(mtu, "MTU (all boards)");
357 MODULE_PARM_DESC(debug, "Debug level (0-6)");
358 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
359 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
360 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
361 MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
362 MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
363 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
364
365 /*
366 Theory of Operation
367
368 I. Board Compatibility
369
370 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
371
372 II. Board-specific settings
373
374 III. Driver operation
375
376 IIIa. Ring buffers
377
378 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
379 ring sizes are set fixed by the hardware, but may optionally be wrapped
380 earlier by the END bit in the descriptor.
381 This driver uses that hardware queue size for the Rx ring, where a large
382 number of entries has no ill effect beyond increases the potential backlog.
383 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
384 disables the queue layer priority ordering and we have no mechanism to
385 utilize the hardware two-level priority queue. When modifying the
386 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
387 levels.
388
389 IIIb/c. Transmit/Receive Structure
390
391 See the Adaptec manual for the many possible structures, and options for
392 each structure. There are far too many to document all of them here.
393
394 For transmit this driver uses type 0/1 transmit descriptors (depending
395 on the 32/64 bitness of the architecture), and relies on automatic
396 minimum-length padding. It does not use the completion queue
397 consumer index, but instead checks for non-zero status entries.
398
399 For receive this driver uses type 2/3 receive descriptors. The driver
400 allocates full frame size skbuffs for the Rx ring buffers, so all frames
401 should fit in a single descriptor. The driver does not use the completion
402 queue consumer index, but instead checks for non-zero status entries.
403
404 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
405 is allocated and the frame is copied to the new skbuff. When the incoming
406 frame is larger, the skbuff is passed directly up the protocol stack.
407 Buffers consumed this way are replaced by newly allocated skbuffs in a later
408 phase of receive.
409
410 A notable aspect of operation is that unaligned buffers are not permitted by
411 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
412 isn't longword aligned, which may cause problems on some machine
413 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
414 the frame into a new skbuff unconditionally. Copied frames are put into the
415 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
416
417 IIId. Synchronization
418
419 The driver runs as two independent, single-threaded flows of control. One
420 is the send-packet routine, which enforces single-threaded use by the
421 dev->tbusy flag. The other thread is the interrupt handler, which is single
422 threaded by the hardware and interrupt handling software.
423
424 The send packet thread has partial control over the Tx ring and the netif_queue
425 status. If the number of free Tx slots in the ring falls below a certain number
426 (currently hardcoded to 4), it signals the upper layer to stop the queue.
427
428 The interrupt handler has exclusive control over the Rx ring and records stats
429 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
430 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
431 number of free Tx slow is above the threshold, it signals the upper layer to
432 restart the queue.
433
434 IV. Notes
435
436 IVb. References
437
438 The Adaptec Starfire manuals, available only from Adaptec.
439 http://www.scyld.com/expert/100mbps.html
440 http://www.scyld.com/expert/NWay.html
441
442 IVc. Errata
443
444 - StopOnPerr is broken, don't enable
445 - Hardware ethernet padding exposes random data, perform software padding
446 instead (unverified -- works correctly for all the hardware I have)
447
448 */
449
450
451
452 enum chip_capability_flags {CanHaveMII=1, };
453
454 enum chipset {
455 CH_6915 = 0,
456 };
457
458 static struct pci_device_id starfire_pci_tbl[] = {
459 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
460 { 0, }
461 };
462 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
463
464 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
465 static const struct chip_info {
466 const char *name;
467 int drv_flags;
468 } netdrv_tbl[] __devinitdata = {
469 { "Adaptec Starfire 6915", CanHaveMII },
470 };
471
472
473 /* Offsets to the device registers.
474 Unlike software-only systems, device drivers interact with complex hardware.
475 It's not useful to define symbolic names for every register bit in the
476 device. The name can only partially document the semantics and make
477 the driver longer and more difficult to read.
478 In general, only the important configuration values or bits changed
479 multiple times should be defined symbolically.
480 */
481 enum register_offsets {
482 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
483 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
484 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
485 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
486 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
487 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
488 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
489 TxThreshold=0x500B0,
490 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
491 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
492 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
493 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
494 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
495 TxMode=0x55000, VlanType=0x55064,
496 PerfFilterTable=0x56000, HashTable=0x56100,
497 TxGfpMem=0x58000, RxGfpMem=0x5a000,
498 };
499
500 /*
501 * Bits in the interrupt status/mask registers.
502 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
503 * enables all the interrupt sources that are or'ed into those status bits.
504 */
505 enum intr_status_bits {
506 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
507 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
508 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
509 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
510 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
511 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
512 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
513 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
514 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
515 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
516 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
517 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
518 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
519 IntrTxGfp=0x02, IntrPCIPad=0x01,
520 /* not quite bits */
521 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
522 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
523 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
524 };
525
526 /* Bits in the RxFilterMode register. */
527 enum rx_mode_bits {
528 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
529 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
530 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
531 WakeupOnGFP=0x0800,
532 };
533
534 /* Bits in the TxMode register */
535 enum tx_mode_bits {
536 MiiSoftReset=0x8000, MIILoopback=0x4000,
537 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
538 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
539 };
540
541 /* Bits in the TxDescCtrl register. */
542 enum tx_ctrl_bits {
543 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
544 TxDescSpace128=0x30, TxDescSpace256=0x40,
545 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
546 TxDescType3=0x03, TxDescType4=0x04,
547 TxNoDMACompletion=0x08,
548 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
549 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
550 TxDMABurstSizeShift=8,
551 };
552
553 /* Bits in the RxDescQCtrl register. */
554 enum rx_ctrl_bits {
555 RxBufferLenShift=16, RxMinDescrThreshShift=0,
556 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
557 Rx2048QEntries=0x4000, Rx256QEntries=0,
558 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
559 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
560 RxDescSpace4=0x000, RxDescSpace8=0x100,
561 RxDescSpace16=0x200, RxDescSpace32=0x300,
562 RxDescSpace64=0x400, RxDescSpace128=0x500,
563 RxConsumerWrEn=0x80,
564 };
565
566 /* Bits in the RxDMACtrl register. */
567 enum rx_dmactrl_bits {
568 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
569 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
570 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
571 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
572 RxChecksumRejectTCPOnly=0x01000000,
573 RxCompletionQ2Enable=0x800000,
574 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
575 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
576 RxDMAQ2NonIP=0x400000,
577 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
578 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
579 RxBurstSizeShift=0,
580 };
581
582 /* Bits in the RxCompletionAddr register */
583 enum rx_compl_bits {
584 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
585 RxComplProducerWrEn=0x40,
586 RxComplType0=0x00, RxComplType1=0x10,
587 RxComplType2=0x20, RxComplType3=0x30,
588 RxComplThreshShift=0,
589 };
590
591 /* Bits in the TxCompletionAddr register */
592 enum tx_compl_bits {
593 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
594 TxComplProducerWrEn=0x40,
595 TxComplIntrStatus=0x20,
596 CommonQueueMode=0x10,
597 TxComplThreshShift=0,
598 };
599
600 /* Bits in the GenCtrl register */
601 enum gen_ctrl_bits {
602 RxEnable=0x05, TxEnable=0x0a,
603 RxGFPEnable=0x10, TxGFPEnable=0x20,
604 };
605
606 /* Bits in the IntrTimerCtrl register */
607 enum intr_ctrl_bits {
608 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
609 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
610 IntrLatencyMask=0x1f,
611 };
612
613 /* The Rx and Tx buffer descriptors. */
614 struct starfire_rx_desc {
615 dma_addr_t rxaddr;
616 };
617 enum rx_desc_bits {
618 RxDescValid=1, RxDescEndRing=2,
619 };
620
621 /* Completion queue entry. */
622 struct short_rx_done_desc {
623 u32 status; /* Low 16 bits is length. */
624 };
625 struct basic_rx_done_desc {
626 u32 status; /* Low 16 bits is length. */
627 u16 vlanid;
628 u16 status2;
629 };
630 struct csum_rx_done_desc {
631 u32 status; /* Low 16 bits is length. */
632 u16 csum; /* Partial checksum */
633 u16 status2;
634 };
635 struct full_rx_done_desc {
636 u32 status; /* Low 16 bits is length. */
637 u16 status3;
638 u16 status2;
639 u16 vlanid;
640 u16 csum; /* partial checksum */
641 u32 timestamp;
642 };
643 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
644 #ifdef VLAN_SUPPORT
645 typedef struct full_rx_done_desc rx_done_desc;
646 #define RxComplType RxComplType3
647 #else /* not VLAN_SUPPORT */
648 typedef struct csum_rx_done_desc rx_done_desc;
649 #define RxComplType RxComplType2
650 #endif /* not VLAN_SUPPORT */
651
652 enum rx_done_bits {
653 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
654 };
655
656 /* Type 1 Tx descriptor. */
657 struct starfire_tx_desc_1 {
658 u32 status; /* Upper bits are status, lower 16 length. */
659 u32 addr;
660 };
661
662 /* Type 2 Tx descriptor. */
663 struct starfire_tx_desc_2 {
664 u32 status; /* Upper bits are status, lower 16 length. */
665 u32 reserved;
666 u64 addr;
667 };
668
669 #ifdef ADDR_64BITS
670 typedef struct starfire_tx_desc_2 starfire_tx_desc;
671 #define TX_DESC_TYPE TxDescType2
672 #else /* not ADDR_64BITS */
673 typedef struct starfire_tx_desc_1 starfire_tx_desc;
674 #define TX_DESC_TYPE TxDescType1
675 #endif /* not ADDR_64BITS */
676 #define TX_DESC_SPACING TxDescSpaceUnlim
677
678 enum tx_desc_bits {
679 TxDescID=0xB0000000,
680 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
681 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
682 };
683 struct tx_done_desc {
684 u32 status; /* timestamp, index. */
685 #if 0
686 u32 intrstatus; /* interrupt status */
687 #endif
688 };
689
690 struct rx_ring_info {
691 struct sk_buff *skb;
692 dma_addr_t mapping;
693 };
694 struct tx_ring_info {
695 struct sk_buff *skb;
696 dma_addr_t mapping;
697 unsigned int used_slots;
698 };
699
700 #define PHY_CNT 2
701 struct netdev_private {
702 /* Descriptor rings first for alignment. */
703 struct starfire_rx_desc *rx_ring;
704 starfire_tx_desc *tx_ring;
705 dma_addr_t rx_ring_dma;
706 dma_addr_t tx_ring_dma;
707 /* The addresses of rx/tx-in-place skbuffs. */
708 struct rx_ring_info rx_info[RX_RING_SIZE];
709 struct tx_ring_info tx_info[TX_RING_SIZE];
710 /* Pointers to completion queues (full pages). */
711 rx_done_desc *rx_done_q;
712 dma_addr_t rx_done_q_dma;
713 unsigned int rx_done;
714 struct tx_done_desc *tx_done_q;
715 dma_addr_t tx_done_q_dma;
716 unsigned int tx_done;
717 struct net_device_stats stats;
718 struct pci_dev *pci_dev;
719 #ifdef VLAN_SUPPORT
720 struct vlan_group *vlgrp;
721 #endif
722 void *queue_mem;
723 dma_addr_t queue_mem_dma;
724 size_t queue_mem_size;
725
726 /* Frequently used values: keep some adjacent for cache effect. */
727 spinlock_t lock;
728 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
729 unsigned int cur_tx, dirty_tx, reap_tx;
730 unsigned int rx_buf_sz; /* Based on MTU+slack. */
731 /* These values keep track of the transceiver/media in use. */
732 int speed100; /* Set if speed == 100MBit. */
733 u32 tx_mode;
734 u32 intr_timer_ctrl;
735 u8 tx_threshold;
736 /* MII transceiver section. */
737 struct mii_if_info mii_if; /* MII lib hooks/info */
738 int phy_cnt; /* MII device addresses. */
739 unsigned char phys[PHY_CNT]; /* MII device addresses. */
740 void __iomem *base;
741 };
742
743
744 static int mdio_read(struct net_device *dev, int phy_id, int location);
745 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
746 static int netdev_open(struct net_device *dev);
747 static void check_duplex(struct net_device *dev);
748 static void tx_timeout(struct net_device *dev);
749 static void init_ring(struct net_device *dev);
750 static int start_tx(struct sk_buff *skb, struct net_device *dev);
751 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
752 static void netdev_error(struct net_device *dev, int intr_status);
753 static int __netdev_rx(struct net_device *dev, int *quota);
754 static void refill_rx_ring(struct net_device *dev);
755 static void netdev_error(struct net_device *dev, int intr_status);
756 static void set_rx_mode(struct net_device *dev);
757 static struct net_device_stats *get_stats(struct net_device *dev);
758 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
759 static int netdev_close(struct net_device *dev);
760 static void netdev_media_change(struct net_device *dev);
761 static struct ethtool_ops ethtool_ops;
762
763
764 #ifdef VLAN_SUPPORT
765 static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
766 {
767 struct netdev_private *np = netdev_priv(dev);
768
769 spin_lock(&np->lock);
770 if (debug > 2)
771 printk("%s: Setting vlgrp to %p\n", dev->name, grp);
772 np->vlgrp = grp;
773 set_rx_mode(dev);
774 spin_unlock(&np->lock);
775 }
776
777 static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
778 {
779 struct netdev_private *np = netdev_priv(dev);
780
781 spin_lock(&np->lock);
782 if (debug > 1)
783 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
784 set_rx_mode(dev);
785 spin_unlock(&np->lock);
786 }
787
788 static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
789 {
790 struct netdev_private *np = netdev_priv(dev);
791
792 spin_lock(&np->lock);
793 if (debug > 1)
794 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
795 if (np->vlgrp)
796 np->vlgrp->vlan_devices[vid] = NULL;
797 set_rx_mode(dev);
798 spin_unlock(&np->lock);
799 }
800 #endif /* VLAN_SUPPORT */
801
802
803 static int __devinit starfire_init_one(struct pci_dev *pdev,
804 const struct pci_device_id *ent)
805 {
806 struct netdev_private *np;
807 int i, irq, option, chip_idx = ent->driver_data;
808 struct net_device *dev;
809 static int card_idx = -1;
810 long ioaddr;
811 void __iomem *base;
812 int drv_flags, io_size;
813 int boguscnt;
814
815 /* when built into the kernel, we only print version if device is found */
816 #ifndef MODULE
817 static int printed_version;
818 if (!printed_version++)
819 printk(version);
820 #endif
821
822 card_idx++;
823
824 if (pci_enable_device (pdev))
825 return -EIO;
826
827 ioaddr = pci_resource_start(pdev, 0);
828 io_size = pci_resource_len(pdev, 0);
829 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
830 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
831 return -ENODEV;
832 }
833
834 dev = alloc_etherdev(sizeof(*np));
835 if (!dev) {
836 printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
837 return -ENOMEM;
838 }
839 SET_MODULE_OWNER(dev);
840 SET_NETDEV_DEV(dev, &pdev->dev);
841
842 irq = pdev->irq;
843
844 if (pci_request_regions (pdev, DRV_NAME)) {
845 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
846 goto err_out_free_netdev;
847 }
848
849 /* ioremap is borken in Linux-2.2.x/sparc64 */
850 base = ioremap(ioaddr, io_size);
851 if (!base) {
852 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
853 card_idx, io_size, ioaddr);
854 goto err_out_free_res;
855 }
856
857 pci_set_master(pdev);
858
859 /* enable MWI -- it vastly improves Rx performance on sparc64 */
860 pci_set_mwi(pdev);
861
862 #ifdef ZEROCOPY
863 /* Starfire can do TCP/UDP checksumming */
864 if (enable_hw_cksum)
865 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
866 #endif /* ZEROCOPY */
867 #ifdef VLAN_SUPPORT
868 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
869 dev->vlan_rx_register = netdev_vlan_rx_register;
870 dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid;
871 dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid;
872 #endif /* VLAN_RX_KILL_VID */
873 #ifdef ADDR_64BITS
874 dev->features |= NETIF_F_HIGHDMA;
875 #endif /* ADDR_64BITS */
876
877 /* Serial EEPROM reads are hidden by the hardware. */
878 for (i = 0; i < 6; i++)
879 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
880
881 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
882 if (debug > 4)
883 for (i = 0; i < 0x20; i++)
884 printk("%2.2x%s",
885 (unsigned int)readb(base + EEPROMCtrl + i),
886 i % 16 != 15 ? " " : "\n");
887 #endif
888
889 /* Issue soft reset */
890 writel(MiiSoftReset, base + TxMode);
891 udelay(1000);
892 writel(0, base + TxMode);
893
894 /* Reset the chip to erase previous misconfiguration. */
895 writel(1, base + PCIDeviceConfig);
896 boguscnt = 1000;
897 while (--boguscnt > 0) {
898 udelay(10);
899 if ((readl(base + PCIDeviceConfig) & 1) == 0)
900 break;
901 }
902 if (boguscnt == 0)
903 printk("%s: chipset reset never completed!\n", dev->name);
904 /* wait a little longer */
905 udelay(1000);
906
907 dev->base_addr = (unsigned long)base;
908 dev->irq = irq;
909
910 np = netdev_priv(dev);
911 np->base = base;
912 spin_lock_init(&np->lock);
913 pci_set_drvdata(pdev, dev);
914
915 np->pci_dev = pdev;
916
917 np->mii_if.dev = dev;
918 np->mii_if.mdio_read = mdio_read;
919 np->mii_if.mdio_write = mdio_write;
920 np->mii_if.phy_id_mask = 0x1f;
921 np->mii_if.reg_num_mask = 0x1f;
922
923 drv_flags = netdrv_tbl[chip_idx].drv_flags;
924
925 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
926 if (dev->mem_start)
927 option = dev->mem_start;
928
929 /* The lower four bits are the media type. */
930 if (option & 0x200)
931 np->mii_if.full_duplex = 1;
932
933 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
934 np->mii_if.full_duplex = 1;
935
936 if (np->mii_if.full_duplex)
937 np->mii_if.force_media = 1;
938 else
939 np->mii_if.force_media = 0;
940 np->speed100 = 1;
941
942 /* timer resolution is 128 * 0.8us */
943 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
944 Timer10X | EnableIntrMasking;
945
946 if (small_frames > 0) {
947 np->intr_timer_ctrl |= SmallFrameBypass;
948 switch (small_frames) {
949 case 1 ... 64:
950 np->intr_timer_ctrl |= SmallFrame64;
951 break;
952 case 65 ... 128:
953 np->intr_timer_ctrl |= SmallFrame128;
954 break;
955 case 129 ... 256:
956 np->intr_timer_ctrl |= SmallFrame256;
957 break;
958 default:
959 np->intr_timer_ctrl |= SmallFrame512;
960 if (small_frames > 512)
961 printk("Adjusting small_frames down to 512\n");
962 break;
963 }
964 }
965
966 /* The chip-specific entries in the device structure. */
967 dev->open = &netdev_open;
968 dev->hard_start_xmit = &start_tx;
969 dev->tx_timeout = tx_timeout;
970 dev->watchdog_timeo = TX_TIMEOUT;
971 init_poll(dev);
972 dev->stop = &netdev_close;
973 dev->get_stats = &get_stats;
974 dev->set_multicast_list = &set_rx_mode;
975 dev->do_ioctl = &netdev_ioctl;
976 SET_ETHTOOL_OPS(dev, &ethtool_ops);
977
978 if (mtu)
979 dev->mtu = mtu;
980
981 if (register_netdev(dev))
982 goto err_out_cleardev;
983
984 printk(KERN_INFO "%s: %s at %p, ",
985 dev->name, netdrv_tbl[chip_idx].name, base);
986 for (i = 0; i < 5; i++)
987 printk("%2.2x:", dev->dev_addr[i]);
988 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
989
990 if (drv_flags & CanHaveMII) {
991 int phy, phy_idx = 0;
992 int mii_status;
993 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
994 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
995 mdelay(100);
996 boguscnt = 1000;
997 while (--boguscnt > 0)
998 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
999 break;
1000 if (boguscnt == 0) {
1001 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
1002 continue;
1003 }
1004 mii_status = mdio_read(dev, phy, MII_BMSR);
1005 if (mii_status != 0) {
1006 np->phys[phy_idx++] = phy;
1007 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
1008 printk(KERN_INFO "%s: MII PHY found at address %d, status "
1009 "%#4.4x advertising %#4.4x.\n",
1010 dev->name, phy, mii_status, np->mii_if.advertising);
1011 /* there can be only one PHY on-board */
1012 break;
1013 }
1014 }
1015 np->phy_cnt = phy_idx;
1016 if (np->phy_cnt > 0)
1017 np->mii_if.phy_id = np->phys[0];
1018 else
1019 memset(&np->mii_if, 0, sizeof(np->mii_if));
1020 }
1021
1022 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
1023 dev->name, enable_hw_cksum ? "enabled" : "disabled");
1024 return 0;
1025
1026 err_out_cleardev:
1027 pci_set_drvdata(pdev, NULL);
1028 iounmap(base);
1029 err_out_free_res:
1030 pci_release_regions (pdev);
1031 err_out_free_netdev:
1032 free_netdev(dev);
1033 return -ENODEV;
1034 }
1035
1036
1037 /* Read the MII Management Data I/O (MDIO) interfaces. */
1038 static int mdio_read(struct net_device *dev, int phy_id, int location)
1039 {
1040 struct netdev_private *np = netdev_priv(dev);
1041 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
1042 int result, boguscnt=1000;
1043 /* ??? Should we add a busy-wait here? */
1044 do
1045 result = readl(mdio_addr);
1046 while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
1047 if (boguscnt == 0)
1048 return 0;
1049 if ((result & 0xffff) == 0xffff)
1050 return 0;
1051 return result & 0xffff;
1052 }
1053
1054
1055 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
1056 {
1057 struct netdev_private *np = netdev_priv(dev);
1058 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
1059 writel(value, mdio_addr);
1060 /* The busy-wait will occur before a read. */
1061 }
1062
1063
1064 static int netdev_open(struct net_device *dev)
1065 {
1066 struct netdev_private *np = netdev_priv(dev);
1067 void __iomem *ioaddr = np->base;
1068 int i, retval;
1069 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
1070
1071 /* Do we ever need to reset the chip??? */
1072
1073 retval = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
1074 if (retval)
1075 return retval;
1076
1077 /* Disable the Rx and Tx, and reset the chip. */
1078 writel(0, ioaddr + GenCtrl);
1079 writel(1, ioaddr + PCIDeviceConfig);
1080 if (debug > 1)
1081 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1082 dev->name, dev->irq);
1083
1084 /* Allocate the various queues. */
1085 if (np->queue_mem == 0) {
1086 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1087 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1088 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1089 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
1090 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
1091 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
1092 if (np->queue_mem == NULL) {
1093 free_irq(dev->irq, dev);
1094 return -ENOMEM;
1095 }
1096
1097 np->tx_done_q = np->queue_mem;
1098 np->tx_done_q_dma = np->queue_mem_dma;
1099 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
1100 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
1101 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
1102 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
1103 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
1104 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
1105 }
1106
1107 /* Start with no carrier, it gets adjusted later */
1108 netif_carrier_off(dev);
1109 init_ring(dev);
1110 /* Set the size of the Rx buffers. */
1111 writel((np->rx_buf_sz << RxBufferLenShift) |
1112 (0 << RxMinDescrThreshShift) |
1113 RxPrefetchMode | RxVariableQ |
1114 RX_Q_ENTRIES |
1115 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
1116 RxDescSpace4,
1117 ioaddr + RxDescQCtrl);
1118
1119 /* Set up the Rx DMA controller. */
1120 writel(RxChecksumIgnore |
1121 (0 << RxEarlyIntThreshShift) |
1122 (6 << RxHighPrioThreshShift) |
1123 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
1124 ioaddr + RxDMACtrl);
1125
1126 /* Set Tx descriptor */
1127 writel((2 << TxHiPriFIFOThreshShift) |
1128 (0 << TxPadLenShift) |
1129 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
1130 TX_DESC_Q_ADDR_SIZE |
1131 TX_DESC_SPACING | TX_DESC_TYPE,
1132 ioaddr + TxDescCtrl);
1133
1134 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
1135 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
1136 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
1137 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
1138 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
1139
1140 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
1141 writel(np->rx_done_q_dma |
1142 RxComplType |
1143 (0 << RxComplThreshShift),
1144 ioaddr + RxCompletionAddr);
1145
1146 if (debug > 1)
1147 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1148
1149 /* Fill both the Tx SA register and the Rx perfect filter. */
1150 for (i = 0; i < 6; i++)
1151 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1152 /* The first entry is special because it bypasses the VLAN filter.
1153 Don't use it. */
1154 writew(0, ioaddr + PerfFilterTable);
1155 writew(0, ioaddr + PerfFilterTable + 4);
1156 writew(0, ioaddr + PerfFilterTable + 8);
1157 for (i = 1; i < 16; i++) {
1158 u16 *eaddrs = (u16 *)dev->dev_addr;
1159 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
1160 writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
1161 writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
1162 writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
1163 }
1164
1165 /* Initialize other registers. */
1166 /* Configure the PCI bus bursts and FIFO thresholds. */
1167 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
1168 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1169 udelay(1000);
1170 writel(np->tx_mode, ioaddr + TxMode);
1171 np->tx_threshold = 4;
1172 writel(np->tx_threshold, ioaddr + TxThreshold);
1173
1174 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1175
1176 netif_start_queue(dev);
1177
1178 if (debug > 1)
1179 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1180 set_rx_mode(dev);
1181
1182 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1183 check_duplex(dev);
1184
1185 /* Enable GPIO interrupts on link change */
1186 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1187
1188 /* Set the interrupt mask */
1189 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1190 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1191 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1192 ioaddr + IntrEnable);
1193 /* Enable PCI interrupts. */
1194 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1195 ioaddr + PCIDeviceConfig);
1196
1197 #ifdef VLAN_SUPPORT
1198 /* Set VLAN type to 802.1q */
1199 writel(ETH_P_8021Q, ioaddr + VlanType);
1200 #endif /* VLAN_SUPPORT */
1201
1202 /* Load Rx/Tx firmware into the frame processors */
1203 for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
1204 writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
1205 for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
1206 writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
1207 if (enable_hw_cksum)
1208 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1209 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1210 else
1211 /* Enable the Rx and Tx units only. */
1212 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1213
1214 if (debug > 1)
1215 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1216 dev->name);
1217
1218 return 0;
1219 }
1220
1221
1222 static void check_duplex(struct net_device *dev)
1223 {
1224 struct netdev_private *np = netdev_priv(dev);
1225 u16 reg0;
1226 int silly_count = 1000;
1227
1228 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1229 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1230 udelay(500);
1231 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1232 /* do nothing */;
1233 if (!silly_count) {
1234 printk("%s: MII reset failed!\n", dev->name);
1235 return;
1236 }
1237
1238 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1239
1240 if (!np->mii_if.force_media) {
1241 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1242 } else {
1243 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1244 if (np->speed100)
1245 reg0 |= BMCR_SPEED100;
1246 if (np->mii_if.full_duplex)
1247 reg0 |= BMCR_FULLDPLX;
1248 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1249 dev->name,
1250 np->speed100 ? "100" : "10",
1251 np->mii_if.full_duplex ? "full" : "half");
1252 }
1253 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1254 }
1255
1256
1257 static void tx_timeout(struct net_device *dev)
1258 {
1259 struct netdev_private *np = netdev_priv(dev);
1260 void __iomem *ioaddr = np->base;
1261 int old_debug;
1262
1263 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1264 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1265
1266 /* Perhaps we should reinitialize the hardware here. */
1267
1268 /*
1269 * Stop and restart the interface.
1270 * Cheat and increase the debug level temporarily.
1271 */
1272 old_debug = debug;
1273 debug = 2;
1274 netdev_close(dev);
1275 netdev_open(dev);
1276 debug = old_debug;
1277
1278 /* Trigger an immediate transmit demand. */
1279
1280 dev->trans_start = jiffies;
1281 np->stats.tx_errors++;
1282 netif_wake_queue(dev);
1283 }
1284
1285
1286 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1287 static void init_ring(struct net_device *dev)
1288 {
1289 struct netdev_private *np = netdev_priv(dev);
1290 int i;
1291
1292 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1293 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1294
1295 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1296
1297 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1298 for (i = 0; i < RX_RING_SIZE; i++) {
1299 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1300 np->rx_info[i].skb = skb;
1301 if (skb == NULL)
1302 break;
1303 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1304 skb->dev = dev; /* Mark as being used by this device. */
1305 /* Grrr, we cannot offset to correctly align the IP header. */
1306 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1307 }
1308 writew(i - 1, np->base + RxDescQIdx);
1309 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1310
1311 /* Clear the remainder of the Rx buffer ring. */
1312 for ( ; i < RX_RING_SIZE; i++) {
1313 np->rx_ring[i].rxaddr = 0;
1314 np->rx_info[i].skb = NULL;
1315 np->rx_info[i].mapping = 0;
1316 }
1317 /* Mark the last entry as wrapping the ring. */
1318 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1319
1320 /* Clear the completion rings. */
1321 for (i = 0; i < DONE_Q_SIZE; i++) {
1322 np->rx_done_q[i].status = 0;
1323 np->tx_done_q[i].status = 0;
1324 }
1325
1326 for (i = 0; i < TX_RING_SIZE; i++)
1327 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1328
1329 return;
1330 }
1331
1332
1333 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1334 {
1335 struct netdev_private *np = netdev_priv(dev);
1336 unsigned int entry;
1337 u32 status;
1338 int i;
1339
1340 /*
1341 * be cautious here, wrapping the queue has weird semantics
1342 * and we may not have enough slots even when it seems we do.
1343 */
1344 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1345 netif_stop_queue(dev);
1346 return 1;
1347 }
1348
1349 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1350 if (skb->ip_summed == CHECKSUM_HW) {
1351 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1352 return NETDEV_TX_OK;
1353 }
1354 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1355
1356 entry = np->cur_tx % TX_RING_SIZE;
1357 for (i = 0; i < skb_num_frags(skb); i++) {
1358 int wrap_ring = 0;
1359 status = TxDescID;
1360
1361 if (i == 0) {
1362 np->tx_info[entry].skb = skb;
1363 status |= TxCRCEn;
1364 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1365 status |= TxRingWrap;
1366 wrap_ring = 1;
1367 }
1368 if (np->reap_tx) {
1369 status |= TxDescIntr;
1370 np->reap_tx = 0;
1371 }
1372 if (skb->ip_summed == CHECKSUM_HW) {
1373 status |= TxCalTCP;
1374 np->stats.tx_compressed++;
1375 }
1376 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1377
1378 np->tx_info[entry].mapping =
1379 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1380 } else {
1381 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1382 status |= this_frag->size;
1383 np->tx_info[entry].mapping =
1384 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1385 }
1386
1387 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1388 np->tx_ring[entry].status = cpu_to_le32(status);
1389 if (debug > 3)
1390 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1391 dev->name, np->cur_tx, np->dirty_tx,
1392 entry, status);
1393 if (wrap_ring) {
1394 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1395 np->cur_tx += np->tx_info[entry].used_slots;
1396 entry = 0;
1397 } else {
1398 np->tx_info[entry].used_slots = 1;
1399 np->cur_tx += np->tx_info[entry].used_slots;
1400 entry++;
1401 }
1402 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1403 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1404 np->reap_tx = 1;
1405 }
1406
1407 /* Non-x86: explicitly flush descriptor cache lines here. */
1408 /* Ensure all descriptors are written back before the transmit is
1409 initiated. - Jes */
1410 wmb();
1411
1412 /* Update the producer index. */
1413 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1414
1415 /* 4 is arbitrary, but should be ok */
1416 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1417 netif_stop_queue(dev);
1418
1419 dev->trans_start = jiffies;
1420
1421 return 0;
1422 }
1423
1424
1425 /* The interrupt handler does all of the Rx thread work and cleans up
1426 after the Tx thread. */
1427 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1428 {
1429 struct net_device *dev = dev_instance;
1430 struct netdev_private *np = netdev_priv(dev);
1431 void __iomem *ioaddr = np->base;
1432 int boguscnt = max_interrupt_work;
1433 int consumer;
1434 int tx_status;
1435 int handled = 0;
1436
1437 do {
1438 u32 intr_status = readl(ioaddr + IntrClear);
1439
1440 if (debug > 4)
1441 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1442 dev->name, intr_status);
1443
1444 if (intr_status == 0 || intr_status == (u32) -1)
1445 break;
1446
1447 handled = 1;
1448
1449 if (intr_status & (IntrRxDone | IntrRxEmpty))
1450 netdev_rx(dev, ioaddr);
1451
1452 /* Scavenge the skbuff list based on the Tx-done queue.
1453 There are redundant checks here that may be cleaned up
1454 after the driver has proven to be reliable. */
1455 consumer = readl(ioaddr + TxConsumerIdx);
1456 if (debug > 3)
1457 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1458 dev->name, consumer);
1459
1460 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1461 if (debug > 3)
1462 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1463 dev->name, np->dirty_tx, np->tx_done, tx_status);
1464 if ((tx_status & 0xe0000000) == 0xa0000000) {
1465 np->stats.tx_packets++;
1466 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1467 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1468 struct sk_buff *skb = np->tx_info[entry].skb;
1469 np->tx_info[entry].skb = NULL;
1470 pci_unmap_single(np->pci_dev,
1471 np->tx_info[entry].mapping,
1472 skb_first_frag_len(skb),
1473 PCI_DMA_TODEVICE);
1474 np->tx_info[entry].mapping = 0;
1475 np->dirty_tx += np->tx_info[entry].used_slots;
1476 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1477 {
1478 int i;
1479 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1480 pci_unmap_single(np->pci_dev,
1481 np->tx_info[entry].mapping,
1482 skb_shinfo(skb)->frags[i].size,
1483 PCI_DMA_TODEVICE);
1484 np->dirty_tx++;
1485 entry++;
1486 }
1487 }
1488
1489 dev_kfree_skb_irq(skb);
1490 }
1491 np->tx_done_q[np->tx_done].status = 0;
1492 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1493 }
1494 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1495
1496 if (netif_queue_stopped(dev) &&
1497 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1498 /* The ring is no longer full, wake the queue. */
1499 netif_wake_queue(dev);
1500 }
1501
1502 /* Stats overflow */
1503 if (intr_status & IntrStatsMax)
1504 get_stats(dev);
1505
1506 /* Media change interrupt. */
1507 if (intr_status & IntrLinkChange)
1508 netdev_media_change(dev);
1509
1510 /* Abnormal error summary/uncommon events handlers. */
1511 if (intr_status & IntrAbnormalSummary)
1512 netdev_error(dev, intr_status);
1513
1514 if (--boguscnt < 0) {
1515 if (debug > 1)
1516 printk(KERN_WARNING "%s: Too much work at interrupt, "
1517 "status=%#8.8x.\n",
1518 dev->name, intr_status);
1519 break;
1520 }
1521 } while (1);
1522
1523 if (debug > 4)
1524 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1525 dev->name, (int) readl(ioaddr + IntrStatus));
1526 return IRQ_RETVAL(handled);
1527 }
1528
1529
1530 /* This routine is logically part of the interrupt/poll handler, but separated
1531 for clarity, code sharing between NAPI/non-NAPI, and better register allocation. */
1532 static int __netdev_rx(struct net_device *dev, int *quota)
1533 {
1534 struct netdev_private *np = netdev_priv(dev);
1535 u32 desc_status;
1536 int retcode = 0;
1537
1538 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1539 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1540 struct sk_buff *skb;
1541 u16 pkt_len;
1542 int entry;
1543 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1544
1545 if (debug > 4)
1546 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1547 if (!(desc_status & RxOK)) {
1548 /* There was an error. */
1549 if (debug > 2)
1550 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1551 np->stats.rx_errors++;
1552 if (desc_status & RxFIFOErr)
1553 np->stats.rx_fifo_errors++;
1554 goto next_rx;
1555 }
1556
1557 if (*quota <= 0) { /* out of rx quota */
1558 retcode = 1;
1559 goto out;
1560 }
1561 (*quota)--;
1562
1563 pkt_len = desc_status; /* Implicitly Truncate */
1564 entry = (desc_status >> 16) & 0x7ff;
1565
1566 if (debug > 4)
1567 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1568 /* Check if the packet is long enough to accept without copying
1569 to a minimally-sized skbuff. */
1570 if (pkt_len < rx_copybreak
1571 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1572 skb->dev = dev;
1573 skb_reserve(skb, 2); /* 16 byte align the IP header */
1574 pci_dma_sync_single_for_cpu(np->pci_dev,
1575 np->rx_info[entry].mapping,
1576 pkt_len, PCI_DMA_FROMDEVICE);
1577 eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0);
1578 pci_dma_sync_single_for_device(np->pci_dev,
1579 np->rx_info[entry].mapping,
1580 pkt_len, PCI_DMA_FROMDEVICE);
1581 skb_put(skb, pkt_len);
1582 } else {
1583 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1584 skb = np->rx_info[entry].skb;
1585 skb_put(skb, pkt_len);
1586 np->rx_info[entry].skb = NULL;
1587 np->rx_info[entry].mapping = 0;
1588 }
1589 #ifndef final_version /* Remove after testing. */
1590 /* You will want this info for the initial debug. */
1591 if (debug > 5)
1592 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1593 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n",
1594 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1595 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1596 skb->data[8], skb->data[9], skb->data[10],
1597 skb->data[11], skb->data[12], skb->data[13]);
1598 #endif
1599
1600 skb->protocol = eth_type_trans(skb, dev);
1601 #ifdef VLAN_SUPPORT
1602 if (debug > 4)
1603 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1604 #endif
1605 if (le16_to_cpu(desc->status2) & 0x0100) {
1606 skb->ip_summed = CHECKSUM_UNNECESSARY;
1607 np->stats.rx_compressed++;
1608 }
1609 /*
1610 * This feature doesn't seem to be working, at least
1611 * with the two firmware versions I have. If the GFP sees
1612 * an IP fragment, it either ignores it completely, or reports
1613 * "bad checksum" on it.
1614 *
1615 * Maybe I missed something -- corrections are welcome.
1616 * Until then, the printk stays. :-) -Ion
1617 */
1618 else if (le16_to_cpu(desc->status2) & 0x0040) {
1619 skb->ip_summed = CHECKSUM_HW;
1620 skb->csum = le16_to_cpu(desc->csum);
1621 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1622 }
1623 #ifdef VLAN_SUPPORT
1624 if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1625 if (debug > 4)
1626 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", le16_to_cpu(desc->vlanid));
1627 /* vlan_netdev_receive_skb() expects a packet with the VLAN tag stripped out */
1628 vlan_netdev_receive_skb(skb, np->vlgrp, le16_to_cpu(desc->vlanid) & VLAN_VID_MASK);
1629 } else
1630 #endif /* VLAN_SUPPORT */
1631 netdev_receive_skb(skb);
1632 dev->last_rx = jiffies;
1633 np->stats.rx_packets++;
1634
1635 next_rx:
1636 np->cur_rx++;
1637 desc->status = 0;
1638 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1639 }
1640 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1641
1642 out:
1643 refill_rx_ring(dev);
1644 if (debug > 5)
1645 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1646 retcode, np->rx_done, desc_status);
1647 return retcode;
1648 }
1649
1650
1651 #ifdef HAVE_NETDEV_POLL
1652 static int netdev_poll(struct net_device *dev, int *budget)
1653 {
1654 u32 intr_status;
1655 struct netdev_private *np = netdev_priv(dev);
1656 void __iomem *ioaddr = np->base;
1657 int retcode = 0, quota = dev->quota;
1658
1659 do {
1660 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1661
1662 retcode = __netdev_rx(dev, &quota);
1663 *budget -= (dev->quota - quota);
1664 dev->quota = quota;
1665 if (retcode)
1666 goto out;
1667
1668 intr_status = readl(ioaddr + IntrStatus);
1669 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1670
1671 netif_rx_complete(dev);
1672 intr_status = readl(ioaddr + IntrEnable);
1673 intr_status |= IntrRxDone | IntrRxEmpty;
1674 writel(intr_status, ioaddr + IntrEnable);
1675
1676 out:
1677 if (debug > 5)
1678 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", retcode);
1679
1680 /* Restart Rx engine if stopped. */
1681 return retcode;
1682 }
1683 #endif /* HAVE_NETDEV_POLL */
1684
1685
1686 static void refill_rx_ring(struct net_device *dev)
1687 {
1688 struct netdev_private *np = netdev_priv(dev);
1689 struct sk_buff *skb;
1690 int entry = -1;
1691
1692 /* Refill the Rx ring buffers. */
1693 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1694 entry = np->dirty_rx % RX_RING_SIZE;
1695 if (np->rx_info[entry].skb == NULL) {
1696 skb = dev_alloc_skb(np->rx_buf_sz);
1697 np->rx_info[entry].skb = skb;
1698 if (skb == NULL)
1699 break; /* Better luck next round. */
1700 np->rx_info[entry].mapping =
1701 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1702 skb->dev = dev; /* Mark as being used by this device. */
1703 np->rx_ring[entry].rxaddr =
1704 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1705 }
1706 if (entry == RX_RING_SIZE - 1)
1707 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1708 }
1709 if (entry >= 0)
1710 writew(entry, np->base + RxDescQIdx);
1711 }
1712
1713
1714 static void netdev_media_change(struct net_device *dev)
1715 {
1716 struct netdev_private *np = netdev_priv(dev);
1717 void __iomem *ioaddr = np->base;
1718 u16 reg0, reg1, reg4, reg5;
1719 u32 new_tx_mode;
1720 u32 new_intr_timer_ctrl;
1721
1722 /* reset status first */
1723 mdio_read(dev, np->phys[0], MII_BMCR);
1724 mdio_read(dev, np->phys[0], MII_BMSR);
1725
1726 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1727 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1728
1729 if (reg1 & BMSR_LSTATUS) {
1730 /* link is up */
1731 if (reg0 & BMCR_ANENABLE) {
1732 /* autonegotiation is enabled */
1733 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1734 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1735 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1736 np->speed100 = 1;
1737 np->mii_if.full_duplex = 1;
1738 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1739 np->speed100 = 1;
1740 np->mii_if.full_duplex = 0;
1741 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1742 np->speed100 = 0;
1743 np->mii_if.full_duplex = 1;
1744 } else {
1745 np->speed100 = 0;
1746 np->mii_if.full_duplex = 0;
1747 }
1748 } else {
1749 /* autonegotiation is disabled */
1750 if (reg0 & BMCR_SPEED100)
1751 np->speed100 = 1;
1752 else
1753 np->speed100 = 0;
1754 if (reg0 & BMCR_FULLDPLX)
1755 np->mii_if.full_duplex = 1;
1756 else
1757 np->mii_if.full_duplex = 0;
1758 }
1759 netif_carrier_on(dev);
1760 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1761 dev->name,
1762 np->speed100 ? "100" : "10",
1763 np->mii_if.full_duplex ? "full" : "half");
1764
1765 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1766 if (np->mii_if.full_duplex)
1767 new_tx_mode |= FullDuplex;
1768 if (np->tx_mode != new_tx_mode) {
1769 np->tx_mode = new_tx_mode;
1770 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1771 udelay(1000);
1772 writel(np->tx_mode, ioaddr + TxMode);
1773 }
1774
1775 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1776 if (np->speed100)
1777 new_intr_timer_ctrl |= Timer10X;
1778 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1779 np->intr_timer_ctrl = new_intr_timer_ctrl;
1780 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1781 }
1782 } else {
1783 netif_carrier_off(dev);
1784 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1785 }
1786 }
1787
1788
1789 static void netdev_error(struct net_device *dev, int intr_status)
1790 {
1791 struct netdev_private *np = netdev_priv(dev);
1792
1793 /* Came close to underrunning the Tx FIFO, increase threshold. */
1794 if (intr_status & IntrTxDataLow) {
1795 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1796 writel(++np->tx_threshold, np->base + TxThreshold);
1797 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1798 dev->name, np->tx_threshold * 16);
1799 } else
1800 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1801 }
1802 if (intr_status & IntrRxGFPDead) {
1803 np->stats.rx_fifo_errors++;
1804 np->stats.rx_errors++;
1805 }
1806 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1807 np->stats.tx_fifo_errors++;
1808 np->stats.tx_errors++;
1809 }
1810 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1811 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1812 dev->name, intr_status);
1813 }
1814
1815
1816 static struct net_device_stats *get_stats(struct net_device *dev)
1817 {
1818 struct netdev_private *np = netdev_priv(dev);
1819 void __iomem *ioaddr = np->base;
1820
1821 /* This adapter architecture needs no SMP locks. */
1822 np->stats.tx_bytes = readl(ioaddr + 0x57010);
1823 np->stats.rx_bytes = readl(ioaddr + 0x57044);
1824 np->stats.tx_packets = readl(ioaddr + 0x57000);
1825 np->stats.tx_aborted_errors =
1826 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1827 np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1828 np->stats.collisions =
1829 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1830
1831 /* The chip only need report frame silently dropped. */
1832 np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1833 writew(0, ioaddr + RxDMAStatus);
1834 np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1835 np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1836 np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1837 np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1838
1839 return &np->stats;
1840 }
1841
1842
1843 static void set_rx_mode(struct net_device *dev)
1844 {
1845 struct netdev_private *np = netdev_priv(dev);
1846 void __iomem *ioaddr = np->base;
1847 u32 rx_mode = MinVLANPrio;
1848 struct dev_mc_list *mclist;
1849 int i;
1850 #ifdef VLAN_SUPPORT
1851
1852 rx_mode |= VlanMode;
1853 if (np->vlgrp) {
1854 int vlan_count = 0;
1855 void __iomem *filter_addr = ioaddr + HashTable + 8;
1856 for (i = 0; i < VLAN_VID_MASK; i++) {
1857 if (np->vlgrp->vlan_devices[i]) {
1858 if (vlan_count >= 32)
1859 break;
1860 writew(cpu_to_be16(i), filter_addr);
1861 filter_addr += 16;
1862 vlan_count++;
1863 }
1864 }
1865 if (i == VLAN_VID_MASK) {
1866 rx_mode |= PerfectFilterVlan;
1867 while (vlan_count < 32) {
1868 writew(0, filter_addr);
1869 filter_addr += 16;
1870 vlan_count++;
1871 }
1872 }
1873 }
1874 #endif /* VLAN_SUPPORT */
1875
1876 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1877 rx_mode |= AcceptAll;
1878 } else if ((dev->mc_count > multicast_filter_limit)
1879 || (dev->flags & IFF_ALLMULTI)) {
1880 /* Too many to match, or accept all multicasts. */
1881 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1882 } else if (dev->mc_count <= 14) {
1883 /* Use the 16 element perfect filter, skip first two entries. */
1884 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1885 u16 *eaddrs;
1886 for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
1887 i++, mclist = mclist->next) {
1888 eaddrs = (u16 *)mclist->dmi_addr;
1889 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
1890 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1891 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
1892 }
1893 eaddrs = (u16 *)dev->dev_addr;
1894 while (i++ < 16) {
1895 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1896 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1897 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1898 }
1899 rx_mode |= AcceptBroadcast|PerfectFilter;
1900 } else {
1901 /* Must use a multicast hash table. */
1902 void __iomem *filter_addr;
1903 u16 *eaddrs;
1904 u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1905
1906 memset(mc_filter, 0, sizeof(mc_filter));
1907 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1908 i++, mclist = mclist->next) {
1909 /* The chip uses the upper 9 CRC bits
1910 as index into the hash table */
1911 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1912 __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
1913
1914 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1915 }
1916 /* Clear the perfect filter list, skip first two entries. */
1917 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1918 eaddrs = (u16 *)dev->dev_addr;
1919 for (i = 2; i < 16; i++) {
1920 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1921 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1922 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1923 }
1924 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1925 writew(mc_filter[i], filter_addr);
1926 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1927 }
1928 writel(rx_mode, ioaddr + RxFilterMode);
1929 }
1930
1931 static int check_if_running(struct net_device *dev)
1932 {
1933 if (!netif_running(dev))
1934 return -EINVAL;
1935 return 0;
1936 }
1937
1938 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1939 {
1940 struct netdev_private *np = netdev_priv(dev);
1941 strcpy(info->driver, DRV_NAME);
1942 strcpy(info->version, DRV_VERSION);
1943 strcpy(info->bus_info, pci_name(np->pci_dev));
1944 }
1945
1946 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1947 {
1948 struct netdev_private *np = netdev_priv(dev);
1949 spin_lock_irq(&np->lock);
1950 mii_ethtool_gset(&np->mii_if, ecmd);
1951 spin_unlock_irq(&np->lock);
1952 return 0;
1953 }
1954
1955 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1956 {
1957 struct netdev_private *np = netdev_priv(dev);
1958 int res;
1959 spin_lock_irq(&np->lock);
1960 res = mii_ethtool_sset(&np->mii_if, ecmd);
1961 spin_unlock_irq(&np->lock);
1962 check_duplex(dev);
1963 return res;
1964 }
1965
1966 static int nway_reset(struct net_device *dev)
1967 {
1968 struct netdev_private *np = netdev_priv(dev);
1969 return mii_nway_restart(&np->mii_if);
1970 }
1971
1972 static u32 get_link(struct net_device *dev)
1973 {
1974 struct netdev_private *np = netdev_priv(dev);
1975 return mii_link_ok(&np->mii_if);
1976 }
1977
1978 static u32 get_msglevel(struct net_device *dev)
1979 {
1980 return debug;
1981 }
1982
1983 static void set_msglevel(struct net_device *dev, u32 val)
1984 {
1985 debug = val;
1986 }
1987
1988 static struct ethtool_ops ethtool_ops = {
1989 .begin = check_if_running,
1990 .get_drvinfo = get_drvinfo,
1991 .get_settings = get_settings,
1992 .set_settings = set_settings,
1993 .nway_reset = nway_reset,
1994 .get_link = get_link,
1995 .get_msglevel = get_msglevel,
1996 .set_msglevel = set_msglevel,
1997 };
1998
1999 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2000 {
2001 struct netdev_private *np = netdev_priv(dev);
2002 struct mii_ioctl_data *data = if_mii(rq);
2003 int rc;
2004
2005 if (!netif_running(dev))
2006 return -EINVAL;
2007
2008 spin_lock_irq(&np->lock);
2009 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
2010 spin_unlock_irq(&np->lock);
2011
2012 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
2013 check_duplex(dev);
2014
2015 return rc;
2016 }
2017
2018 static int netdev_close(struct net_device *dev)
2019 {
2020 struct netdev_private *np = netdev_priv(dev);
2021 void __iomem *ioaddr = np->base;
2022 int i;
2023
2024 netif_stop_queue(dev);
2025
2026 if (debug > 1) {
2027 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
2028 dev->name, (int) readl(ioaddr + IntrStatus));
2029 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
2030 dev->name, np->cur_tx, np->dirty_tx,
2031 np->cur_rx, np->dirty_rx);
2032 }
2033
2034 /* Disable interrupts by clearing the interrupt mask. */
2035 writel(0, ioaddr + IntrEnable);
2036
2037 /* Stop the chip's Tx and Rx processes. */
2038 writel(0, ioaddr + GenCtrl);
2039 readl(ioaddr + GenCtrl);
2040
2041 if (debug > 5) {
2042 printk(KERN_DEBUG" Tx ring at %#llx:\n",
2043 (long long) np->tx_ring_dma);
2044 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
2045 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
2046 i, le32_to_cpu(np->tx_ring[i].status),
2047 (long long) dma_to_cpu(np->tx_ring[i].addr),
2048 le32_to_cpu(np->tx_done_q[i].status));
2049 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
2050 (long long) np->rx_ring_dma, np->rx_done_q);
2051 if (np->rx_done_q)
2052 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
2053 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
2054 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
2055 }
2056 }
2057
2058 free_irq(dev->irq, dev);
2059
2060 /* Free all the skbuffs in the Rx queue. */
2061 for (i = 0; i < RX_RING_SIZE; i++) {
2062 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
2063 if (np->rx_info[i].skb != NULL) {
2064 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
2065 dev_kfree_skb(np->rx_info[i].skb);
2066 }
2067 np->rx_info[i].skb = NULL;
2068 np->rx_info[i].mapping = 0;
2069 }
2070 for (i = 0; i < TX_RING_SIZE; i++) {
2071 struct sk_buff *skb = np->tx_info[i].skb;
2072 if (skb == NULL)
2073 continue;
2074 pci_unmap_single(np->pci_dev,
2075 np->tx_info[i].mapping,
2076 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
2077 np->tx_info[i].mapping = 0;
2078 dev_kfree_skb(skb);
2079 np->tx_info[i].skb = NULL;
2080 }
2081
2082 return 0;
2083 }
2084
2085 #ifdef CONFIG_PM
2086 static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
2087 {
2088 struct net_device *dev = pci_get_drvdata(pdev);
2089
2090 if (netif_running(dev)) {
2091 netif_device_detach(dev);
2092 netdev_close(dev);
2093 }
2094
2095 pci_save_state(pdev);
2096 pci_set_power_state(pdev, pci_choose_state(pdev,state));
2097
2098 return 0;
2099 }
2100
2101 static int starfire_resume(struct pci_dev *pdev)
2102 {
2103 struct net_device *dev = pci_get_drvdata(pdev);
2104
2105 pci_set_power_state(pdev, PCI_D0);
2106 pci_restore_state(pdev);
2107
2108 if (netif_running(dev)) {
2109 netdev_open(dev);
2110 netif_device_attach(dev);
2111 }
2112
2113 return 0;
2114 }
2115 #endif /* CONFIG_PM */
2116
2117
2118 static void __devexit starfire_remove_one (struct pci_dev *pdev)
2119 {
2120 struct net_device *dev = pci_get_drvdata(pdev);
2121 struct netdev_private *np = netdev_priv(dev);
2122
2123 BUG_ON(!dev);
2124
2125 unregister_netdev(dev);
2126
2127 if (np->queue_mem)
2128 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2129
2130
2131 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2132 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2133 pci_disable_device(pdev);
2134
2135 iounmap(np->base);
2136 pci_release_regions(pdev);
2137
2138 pci_set_drvdata(pdev, NULL);
2139 free_netdev(dev); /* Will also free np!! */
2140 }
2141
2142
2143 static struct pci_driver starfire_driver = {
2144 .name = DRV_NAME,
2145 .probe = starfire_init_one,
2146 .remove = __devexit_p(starfire_remove_one),
2147 #ifdef CONFIG_PM
2148 .suspend = starfire_suspend,
2149 .resume = starfire_resume,
2150 #endif /* CONFIG_PM */
2151 .id_table = starfire_pci_tbl,
2152 };
2153
2154
2155 static int __init starfire_init (void)
2156 {
2157 /* when a module, this is printed whether or not devices are found in probe */
2158 #ifdef MODULE
2159 printk(version);
2160 #ifdef HAVE_NETDEV_POLL
2161 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2162 #else
2163 printk(KERN_INFO DRV_NAME ": polling (NAPI) disabled\n");
2164 #endif
2165 #endif
2166
2167 /* we can do this test only at run-time... sigh */
2168 if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2169 printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2170 return -ENODEV;
2171 }
2172
2173 return pci_module_init (&starfire_driver);
2174 }
2175
2176
2177 static void __exit starfire_cleanup (void)
2178 {
2179 pci_unregister_driver (&starfire_driver);
2180 }
2181
2182
2183 module_init(starfire_init);
2184 module_exit(starfire_cleanup);
2185
2186
2187 /*
2188 * Local variables:
2189 * c-basic-offset: 8
2190 * tab-width: 8
2191 * End:
2192 */