Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* winbond-840.c: A Linux PCI network adapter device driver. */ |
2 | /* | |
3 | Written 1998-2001 by Donald Becker. | |
4 | ||
5 | This software may be used and distributed according to the terms of | |
6 | the GNU General Public License (GPL), incorporated herein by reference. | |
7 | Drivers based on or derived from this code fall under the GPL and must | |
8 | retain the authorship, copyright and license notice. This file is not | |
9 | a complete program and may only be used when the entire operating | |
10 | system is licensed under the GPL. | |
11 | ||
12 | The author may be reached as becker@scyld.com, or C/O | |
13 | Scyld Computing Corporation | |
14 | 410 Severn Ave., Suite 210 | |
15 | Annapolis MD 21403 | |
16 | ||
17 | Support and updates available at | |
18 | http://www.scyld.com/network/drivers.html | |
19 | ||
20 | Do not remove the copyright information. | |
21 | Do not change the version information unless an improvement has been made. | |
22 | Merely removing my name, as Compex has done in the past, does not count | |
23 | as an improvement. | |
24 | ||
25 | Changelog: | |
26 | * ported to 2.4 | |
27 | ??? | |
28 | * spin lock update, memory barriers, new style dma mappings | |
29 | limit each tx buffer to < 1024 bytes | |
30 | remove DescIntr from Rx descriptors (that's an Tx flag) | |
31 | remove next pointer from Tx descriptors | |
32 | synchronize tx_q_bytes | |
33 | software reset in tx_timeout | |
34 | Copyright (C) 2000 Manfred Spraul | |
35 | * further cleanups | |
36 | power management. | |
37 | support for big endian descriptors | |
38 | Copyright (C) 2001 Manfred Spraul | |
39 | * ethtool support (jgarzik) | |
40 | * Replace some MII-related magic numbers with constants (jgarzik) | |
f3b197ac | 41 | |
1da177e4 LT |
42 | TODO: |
43 | * enable pci_power_off | |
44 | * Wake-On-LAN | |
45 | */ | |
f3b197ac | 46 | |
1da177e4 LT |
47 | #define DRV_NAME "winbond-840" |
48 | #define DRV_VERSION "1.01-d" | |
49 | #define DRV_RELDATE "Nov-17-2001" | |
50 | ||
51 | ||
52 | /* Automatically extracted configuration info: | |
53 | probe-func: winbond840_probe | |
54 | config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840 | |
55 | ||
56 | c-help-name: Winbond W89c840 PCI Ethernet support | |
57 | c-help-symbol: CONFIG_WINBOND_840 | |
58 | c-help: This driver is for the Winbond W89c840 chip. It also works with | |
59 | c-help: the TX9882 chip on the Compex RL100-ATX board. | |
f3b197ac | 60 | c-help: More specific information and updates are available from |
1da177e4 LT |
61 | c-help: http://www.scyld.com/network/drivers.html |
62 | */ | |
63 | ||
64 | /* The user-configurable values. | |
65 | These may be modified when a driver module is loaded.*/ | |
66 | ||
67 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ | |
68 | static int max_interrupt_work = 20; | |
69 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | |
70 | The '840 uses a 64 element hash table based on the Ethernet CRC. */ | |
71 | static int multicast_filter_limit = 32; | |
72 | ||
73 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | |
74 | Setting to > 1518 effectively disables this feature. */ | |
75 | static int rx_copybreak; | |
76 | ||
77 | /* Used to pass the media type, etc. | |
78 | Both 'options[]' and 'full_duplex[]' should exist for driver | |
79 | interoperability. | |
80 | The media type is usually passed in 'options[]'. | |
81 | */ | |
82 | #define MAX_UNITS 8 /* More are supported, limit only on options */ | |
83 | static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |
84 | static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |
85 | ||
86 | /* Operational parameters that are set at compile time. */ | |
87 | ||
88 | /* Keep the ring sizes a power of two for compile efficiency. | |
89 | The compiler will convert <unsigned>'%'<2^N> into a bit mask. | |
90 | Making the Tx ring too large decreases the effectiveness of channel | |
91 | bonding and packet priority. | |
92 | There are no ill effects from too-large receive rings. */ | |
93 | #define TX_RING_SIZE 16 | |
94 | #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ | |
95 | #define TX_QUEUE_LEN_RESTART 5 | |
96 | #define RX_RING_SIZE 32 | |
97 | ||
98 | #define TX_BUFLIMIT (1024-128) | |
99 | ||
100 | /* The presumed FIFO size for working around the Tx-FIFO-overflow bug. | |
101 | To avoid overflowing we don't queue again until we have room for a | |
102 | full-size packet. | |
103 | */ | |
104 | #define TX_FIFO_SIZE (2048) | |
105 | #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16) | |
106 | ||
107 | ||
108 | /* Operational parameters that usually are not changed. */ | |
109 | /* Time in jiffies before concluding the transmitter is hung. */ | |
110 | #define TX_TIMEOUT (2*HZ) | |
111 | ||
112 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | |
113 | ||
114 | /* Include files, designed to support most kernel versions 2.0.0 and later. */ | |
115 | #include <linux/module.h> | |
116 | #include <linux/kernel.h> | |
117 | #include <linux/string.h> | |
118 | #include <linux/timer.h> | |
119 | #include <linux/errno.h> | |
120 | #include <linux/ioport.h> | |
121 | #include <linux/slab.h> | |
122 | #include <linux/interrupt.h> | |
123 | #include <linux/pci.h> | |
10a87fcf | 124 | #include <linux/dma-mapping.h> |
1da177e4 LT |
125 | #include <linux/netdevice.h> |
126 | #include <linux/etherdevice.h> | |
127 | #include <linux/skbuff.h> | |
128 | #include <linux/init.h> | |
129 | #include <linux/delay.h> | |
130 | #include <linux/ethtool.h> | |
131 | #include <linux/mii.h> | |
132 | #include <linux/rtnetlink.h> | |
133 | #include <linux/crc32.h> | |
134 | #include <linux/bitops.h> | |
135 | #include <asm/uaccess.h> | |
136 | #include <asm/processor.h> /* Processor type for cache alignment. */ | |
137 | #include <asm/io.h> | |
138 | #include <asm/irq.h> | |
139 | ||
140 | /* These identify the driver base version and may not be removed. */ | |
141 | static char version[] __devinitdata = | |
142 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" | |
143 | KERN_INFO " http://www.scyld.com/network/drivers.html\n"; | |
144 | ||
145 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | |
146 | MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver"); | |
147 | MODULE_LICENSE("GPL"); | |
148 | MODULE_VERSION(DRV_VERSION); | |
149 | ||
150 | module_param(max_interrupt_work, int, 0); | |
151 | module_param(debug, int, 0); | |
152 | module_param(rx_copybreak, int, 0); | |
153 | module_param(multicast_filter_limit, int, 0); | |
154 | module_param_array(options, int, NULL, 0); | |
155 | module_param_array(full_duplex, int, NULL, 0); | |
156 | MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt"); | |
157 | MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)"); | |
158 | MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames"); | |
159 | MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses"); | |
160 | MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex"); | |
161 | MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)"); | |
162 | ||
163 | /* | |
164 | Theory of Operation | |
165 | ||
166 | I. Board Compatibility | |
167 | ||
168 | This driver is for the Winbond w89c840 chip. | |
169 | ||
170 | II. Board-specific settings | |
171 | ||
172 | None. | |
173 | ||
174 | III. Driver operation | |
175 | ||
176 | This chip is very similar to the Digital 21*4* "Tulip" family. The first | |
177 | twelve registers and the descriptor format are nearly identical. Read a | |
178 | Tulip manual for operational details. | |
179 | ||
180 | A significant difference is that the multicast filter and station address are | |
181 | stored in registers rather than loaded through a pseudo-transmit packet. | |
182 | ||
183 | Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a | |
184 | full-sized packet we must use both data buffers in a descriptor. Thus the | |
185 | driver uses ring mode where descriptors are implicitly sequential in memory, | |
186 | rather than using the second descriptor address as a chain pointer to | |
187 | subsequent descriptors. | |
188 | ||
189 | IV. Notes | |
190 | ||
191 | If you are going to almost clone a Tulip, why not go all the way and avoid | |
192 | the need for a new driver? | |
193 | ||
194 | IVb. References | |
195 | ||
196 | http://www.scyld.com/expert/100mbps.html | |
197 | http://www.scyld.com/expert/NWay.html | |
198 | http://www.winbond.com.tw/ | |
199 | ||
200 | IVc. Errata | |
201 | ||
202 | A horrible bug exists in the transmit FIFO. Apparently the chip doesn't | |
203 | correctly detect a full FIFO, and queuing more than 2048 bytes may result in | |
204 | silent data corruption. | |
205 | ||
206 | Test with 'ping -s 10000' on a fast computer. | |
207 | ||
208 | */ | |
209 | ||
f3b197ac | 210 | |
1da177e4 LT |
211 | |
212 | /* | |
213 | PCI probe table. | |
214 | */ | |
1da177e4 | 215 | enum chip_capability_flags { |
1f1bd5fc JG |
216 | CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8, |
217 | }; | |
218 | ||
219 | static const struct pci_device_id w840_pci_tbl[] = { | |
1da177e4 LT |
220 | { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 }, |
221 | { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, | |
222 | { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, | |
1f1bd5fc | 223 | { } |
1da177e4 LT |
224 | }; |
225 | MODULE_DEVICE_TABLE(pci, w840_pci_tbl); | |
226 | ||
227 | struct pci_id_info { | |
228 | const char *name; | |
229 | struct match_info { | |
230 | int pci, pci_mask, subsystem, subsystem_mask; | |
231 | int revision, revision_mask; /* Only 8 bits. */ | |
232 | } id; | |
1da177e4 LT |
233 | int io_size; /* Needed for I/O region check or ioremap(). */ |
234 | int drv_flags; /* Driver use, intended as capability flags. */ | |
235 | }; | |
236 | static struct pci_id_info pci_id_tbl[] = { | |
237 | {"Winbond W89c840", /* Sometime a Level-One switch card. */ | |
238 | { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 }, | |
1f1bd5fc | 239 | 128, CanHaveMII | HasBrokenTx | FDXOnNoMII}, |
1da177e4 | 240 | {"Winbond W89c840", { 0x08401050, 0xffffffff, }, |
1f1bd5fc | 241 | 128, CanHaveMII | HasBrokenTx}, |
1da177e4 | 242 | {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,}, |
1f1bd5fc | 243 | 128, CanHaveMII | HasBrokenTx}, |
1da177e4 LT |
244 | {NULL,}, /* 0 terminated list. */ |
245 | }; | |
246 | ||
247 | /* This driver was written to use PCI memory space, however some x86 systems | |
248 | work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space | |
249 | accesses instead of memory space. */ | |
250 | ||
251 | /* Offsets to the Command and Status Registers, "CSRs". | |
252 | While similar to the Tulip, these registers are longword aligned. | |
253 | Note: It's not useful to define symbolic names for every register bit in | |
254 | the device. The name can only partially document the semantics and make | |
255 | the driver longer and more difficult to read. | |
256 | */ | |
257 | enum w840_offsets { | |
258 | PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08, | |
259 | RxRingPtr=0x0C, TxRingPtr=0x10, | |
260 | IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C, | |
261 | RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C, | |
262 | CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */ | |
263 | MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40, | |
264 | CurTxDescAddr=0x4C, CurTxBufAddr=0x50, | |
265 | }; | |
266 | ||
267 | /* Bits in the interrupt status/enable registers. */ | |
268 | /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */ | |
269 | enum intr_status_bits { | |
270 | NormalIntr=0x10000, AbnormalIntr=0x8000, | |
271 | IntrPCIErr=0x2000, TimerInt=0x800, | |
272 | IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40, | |
273 | TxFIFOUnderflow=0x20, RxErrIntr=0x10, | |
274 | TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01, | |
275 | }; | |
276 | ||
277 | /* Bits in the NetworkConfig register. */ | |
278 | enum rx_mode_bits { | |
279 | AcceptErr=0x80, AcceptRunt=0x40, | |
280 | AcceptBroadcast=0x20, AcceptMulticast=0x10, | |
281 | AcceptAllPhys=0x08, AcceptMyPhys=0x02, | |
282 | }; | |
283 | ||
284 | enum mii_reg_bits { | |
285 | MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000, | |
286 | MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000, | |
287 | }; | |
288 | ||
289 | /* The Tulip Rx and Tx buffer descriptors. */ | |
290 | struct w840_rx_desc { | |
291 | s32 status; | |
292 | s32 length; | |
293 | u32 buffer1; | |
294 | u32 buffer2; | |
295 | }; | |
296 | ||
297 | struct w840_tx_desc { | |
298 | s32 status; | |
299 | s32 length; | |
300 | u32 buffer1, buffer2; | |
301 | }; | |
302 | ||
303 | /* Bits in network_desc.status */ | |
304 | enum desc_status_bits { | |
305 | DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000, | |
306 | DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000, | |
307 | DescIntr=0x80000000, | |
308 | }; | |
309 | ||
310 | #define MII_CNT 1 /* winbond only supports one MII */ | |
311 | struct netdev_private { | |
312 | struct w840_rx_desc *rx_ring; | |
313 | dma_addr_t rx_addr[RX_RING_SIZE]; | |
314 | struct w840_tx_desc *tx_ring; | |
315 | dma_addr_t tx_addr[TX_RING_SIZE]; | |
316 | dma_addr_t ring_dma_addr; | |
317 | /* The addresses of receive-in-place skbuffs. */ | |
318 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | |
319 | /* The saved address of a sent-in-place packet/buffer, for later free(). */ | |
320 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | |
321 | struct net_device_stats stats; | |
322 | struct timer_list timer; /* Media monitoring timer. */ | |
323 | /* Frequently used values: keep some adjacent for cache effect. */ | |
324 | spinlock_t lock; | |
325 | int chip_id, drv_flags; | |
326 | struct pci_dev *pci_dev; | |
327 | int csr6; | |
328 | struct w840_rx_desc *rx_head_desc; | |
329 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ | |
330 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ | |
331 | unsigned int cur_tx, dirty_tx; | |
332 | unsigned int tx_q_bytes; | |
333 | unsigned int tx_full; /* The Tx queue is full. */ | |
334 | /* MII transceiver section. */ | |
335 | int mii_cnt; /* MII device addresses. */ | |
336 | unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */ | |
337 | u32 mii; | |
338 | struct mii_if_info mii_if; | |
339 | void __iomem *base_addr; | |
340 | }; | |
341 | ||
342 | static int eeprom_read(void __iomem *ioaddr, int location); | |
343 | static int mdio_read(struct net_device *dev, int phy_id, int location); | |
344 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | |
345 | static int netdev_open(struct net_device *dev); | |
346 | static int update_link(struct net_device *dev); | |
347 | static void netdev_timer(unsigned long data); | |
348 | static void init_rxtx_rings(struct net_device *dev); | |
349 | static void free_rxtx_rings(struct netdev_private *np); | |
350 | static void init_registers(struct net_device *dev); | |
351 | static void tx_timeout(struct net_device *dev); | |
352 | static int alloc_ringdesc(struct net_device *dev); | |
353 | static void free_ringdesc(struct netdev_private *np); | |
354 | static int start_tx(struct sk_buff *skb, struct net_device *dev); | |
355 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs); | |
356 | static void netdev_error(struct net_device *dev, int intr_status); | |
357 | static int netdev_rx(struct net_device *dev); | |
358 | static u32 __set_rx_mode(struct net_device *dev); | |
359 | static void set_rx_mode(struct net_device *dev); | |
360 | static struct net_device_stats *get_stats(struct net_device *dev); | |
361 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | |
362 | static struct ethtool_ops netdev_ethtool_ops; | |
363 | static int netdev_close(struct net_device *dev); | |
364 | ||
f3b197ac | 365 | |
1da177e4 LT |
366 | |
367 | static int __devinit w840_probe1 (struct pci_dev *pdev, | |
368 | const struct pci_device_id *ent) | |
369 | { | |
370 | struct net_device *dev; | |
371 | struct netdev_private *np; | |
372 | static int find_cnt; | |
373 | int chip_idx = ent->driver_data; | |
374 | int irq; | |
375 | int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; | |
376 | void __iomem *ioaddr; | |
377 | int bar = 1; | |
378 | ||
379 | i = pci_enable_device(pdev); | |
380 | if (i) return i; | |
381 | ||
382 | pci_set_master(pdev); | |
383 | ||
384 | irq = pdev->irq; | |
385 | ||
10a87fcf | 386 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { |
1da177e4 LT |
387 | printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n", |
388 | pci_name(pdev)); | |
389 | return -EIO; | |
390 | } | |
391 | dev = alloc_etherdev(sizeof(*np)); | |
392 | if (!dev) | |
393 | return -ENOMEM; | |
394 | SET_MODULE_OWNER(dev); | |
395 | SET_NETDEV_DEV(dev, &pdev->dev); | |
396 | ||
397 | if (pci_request_regions(pdev, DRV_NAME)) | |
398 | goto err_out_netdev; | |
399 | #ifdef USE_IO_OPS | |
400 | bar = 0; | |
401 | #endif | |
402 | ioaddr = pci_iomap(pdev, bar, pci_id_tbl[chip_idx].io_size); | |
403 | if (!ioaddr) | |
404 | goto err_out_free_res; | |
405 | ||
406 | for (i = 0; i < 3; i++) | |
407 | ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i)); | |
408 | ||
409 | /* Reset the chip to erase previous misconfiguration. | |
410 | No hold time required! */ | |
411 | iowrite32(0x00000001, ioaddr + PCIBusCfg); | |
412 | ||
413 | dev->base_addr = (unsigned long)ioaddr; | |
414 | dev->irq = irq; | |
415 | ||
416 | np = netdev_priv(dev); | |
417 | np->pci_dev = pdev; | |
418 | np->chip_id = chip_idx; | |
419 | np->drv_flags = pci_id_tbl[chip_idx].drv_flags; | |
420 | spin_lock_init(&np->lock); | |
421 | np->mii_if.dev = dev; | |
422 | np->mii_if.mdio_read = mdio_read; | |
423 | np->mii_if.mdio_write = mdio_write; | |
424 | np->base_addr = ioaddr; | |
f3b197ac | 425 | |
1da177e4 LT |
426 | pci_set_drvdata(pdev, dev); |
427 | ||
428 | if (dev->mem_start) | |
429 | option = dev->mem_start; | |
430 | ||
431 | /* The lower four bits are the media type. */ | |
432 | if (option > 0) { | |
433 | if (option & 0x200) | |
434 | np->mii_if.full_duplex = 1; | |
435 | if (option & 15) | |
436 | printk(KERN_INFO "%s: ignoring user supplied media type %d", | |
437 | dev->name, option & 15); | |
438 | } | |
439 | if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0) | |
440 | np->mii_if.full_duplex = 1; | |
441 | ||
442 | if (np->mii_if.full_duplex) | |
443 | np->mii_if.force_media = 1; | |
444 | ||
445 | /* The chip-specific entries in the device structure. */ | |
446 | dev->open = &netdev_open; | |
447 | dev->hard_start_xmit = &start_tx; | |
448 | dev->stop = &netdev_close; | |
449 | dev->get_stats = &get_stats; | |
450 | dev->set_multicast_list = &set_rx_mode; | |
451 | dev->do_ioctl = &netdev_ioctl; | |
452 | dev->ethtool_ops = &netdev_ethtool_ops; | |
453 | dev->tx_timeout = &tx_timeout; | |
454 | dev->watchdog_timeo = TX_TIMEOUT; | |
455 | ||
456 | i = register_netdev(dev); | |
457 | if (i) | |
458 | goto err_out_cleardev; | |
459 | ||
460 | printk(KERN_INFO "%s: %s at %p, ", | |
461 | dev->name, pci_id_tbl[chip_idx].name, ioaddr); | |
462 | for (i = 0; i < 5; i++) | |
463 | printk("%2.2x:", dev->dev_addr[i]); | |
464 | printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); | |
465 | ||
466 | if (np->drv_flags & CanHaveMII) { | |
467 | int phy, phy_idx = 0; | |
468 | for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) { | |
469 | int mii_status = mdio_read(dev, phy, MII_BMSR); | |
470 | if (mii_status != 0xffff && mii_status != 0x0000) { | |
471 | np->phys[phy_idx++] = phy; | |
472 | np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); | |
473 | np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+ | |
474 | mdio_read(dev, phy, MII_PHYSID2); | |
475 | printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status " | |
476 | "0x%4.4x advertising %4.4x.\n", | |
477 | dev->name, np->mii, phy, mii_status, np->mii_if.advertising); | |
478 | } | |
479 | } | |
480 | np->mii_cnt = phy_idx; | |
481 | np->mii_if.phy_id = np->phys[0]; | |
482 | if (phy_idx == 0) { | |
483 | printk(KERN_WARNING "%s: MII PHY not found -- this device may " | |
484 | "not operate correctly.\n", dev->name); | |
485 | } | |
486 | } | |
487 | ||
488 | find_cnt++; | |
489 | return 0; | |
490 | ||
491 | err_out_cleardev: | |
492 | pci_set_drvdata(pdev, NULL); | |
493 | pci_iounmap(pdev, ioaddr); | |
494 | err_out_free_res: | |
495 | pci_release_regions(pdev); | |
496 | err_out_netdev: | |
497 | free_netdev (dev); | |
498 | return -ENODEV; | |
499 | } | |
500 | ||
f3b197ac | 501 | |
1da177e4 LT |
502 | /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are |
503 | often serial bit streams generated by the host processor. | |
504 | The example below is for the common 93c46 EEPROM, 64 16 bit words. */ | |
505 | ||
506 | /* Delay between EEPROM clock transitions. | |
507 | No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need | |
508 | a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that | |
509 | made udelay() unreliable. | |
510 | The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is | |
511 | depricated. | |
512 | */ | |
513 | #define eeprom_delay(ee_addr) ioread32(ee_addr) | |
514 | ||
515 | enum EEPROM_Ctrl_Bits { | |
516 | EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805, | |
517 | EE_ChipSelect=0x801, EE_DataIn=0x08, | |
518 | }; | |
519 | ||
520 | /* The EEPROM commands include the alway-set leading bit. */ | |
521 | enum EEPROM_Cmds { | |
522 | EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), | |
523 | }; | |
524 | ||
525 | static int eeprom_read(void __iomem *addr, int location) | |
526 | { | |
527 | int i; | |
528 | int retval = 0; | |
529 | void __iomem *ee_addr = addr + EECtrl; | |
530 | int read_cmd = location | EE_ReadCmd; | |
531 | iowrite32(EE_ChipSelect, ee_addr); | |
532 | ||
533 | /* Shift the read command bits out. */ | |
534 | for (i = 10; i >= 0; i--) { | |
535 | short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; | |
536 | iowrite32(dataval, ee_addr); | |
537 | eeprom_delay(ee_addr); | |
538 | iowrite32(dataval | EE_ShiftClk, ee_addr); | |
539 | eeprom_delay(ee_addr); | |
540 | } | |
541 | iowrite32(EE_ChipSelect, ee_addr); | |
542 | eeprom_delay(ee_addr); | |
543 | ||
544 | for (i = 16; i > 0; i--) { | |
545 | iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr); | |
546 | eeprom_delay(ee_addr); | |
547 | retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0); | |
548 | iowrite32(EE_ChipSelect, ee_addr); | |
549 | eeprom_delay(ee_addr); | |
550 | } | |
551 | ||
552 | /* Terminate the EEPROM access. */ | |
553 | iowrite32(0, ee_addr); | |
554 | return retval; | |
555 | } | |
556 | ||
557 | /* MII transceiver control section. | |
558 | Read and write the MII registers using software-generated serial | |
559 | MDIO protocol. See the MII specifications or DP83840A data sheet | |
560 | for details. | |
561 | ||
562 | The maximum data clock rate is 2.5 Mhz. The minimum timing is usually | |
563 | met by back-to-back 33Mhz PCI cycles. */ | |
564 | #define mdio_delay(mdio_addr) ioread32(mdio_addr) | |
565 | ||
566 | /* Set iff a MII transceiver on any interface requires mdio preamble. | |
567 | This only set with older transceivers, so the extra | |
568 | code size of a per-interface flag is not worthwhile. */ | |
569 | static char mii_preamble_required = 1; | |
570 | ||
571 | #define MDIO_WRITE0 (MDIO_EnbOutput) | |
572 | #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput) | |
573 | ||
574 | /* Generate the preamble required for initial synchronization and | |
575 | a few older transceivers. */ | |
576 | static void mdio_sync(void __iomem *mdio_addr) | |
577 | { | |
578 | int bits = 32; | |
579 | ||
580 | /* Establish sync by sending at least 32 logic ones. */ | |
581 | while (--bits >= 0) { | |
582 | iowrite32(MDIO_WRITE1, mdio_addr); | |
583 | mdio_delay(mdio_addr); | |
584 | iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); | |
585 | mdio_delay(mdio_addr); | |
586 | } | |
587 | } | |
588 | ||
589 | static int mdio_read(struct net_device *dev, int phy_id, int location) | |
590 | { | |
591 | struct netdev_private *np = netdev_priv(dev); | |
592 | void __iomem *mdio_addr = np->base_addr + MIICtrl; | |
593 | int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; | |
594 | int i, retval = 0; | |
595 | ||
596 | if (mii_preamble_required) | |
597 | mdio_sync(mdio_addr); | |
598 | ||
599 | /* Shift the read command bits out. */ | |
600 | for (i = 15; i >= 0; i--) { | |
601 | int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; | |
602 | ||
603 | iowrite32(dataval, mdio_addr); | |
604 | mdio_delay(mdio_addr); | |
605 | iowrite32(dataval | MDIO_ShiftClk, mdio_addr); | |
606 | mdio_delay(mdio_addr); | |
607 | } | |
608 | /* Read the two transition, 16 data, and wire-idle bits. */ | |
609 | for (i = 20; i > 0; i--) { | |
610 | iowrite32(MDIO_EnbIn, mdio_addr); | |
611 | mdio_delay(mdio_addr); | |
612 | retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0); | |
613 | iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); | |
614 | mdio_delay(mdio_addr); | |
615 | } | |
616 | return (retval>>1) & 0xffff; | |
617 | } | |
618 | ||
619 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value) | |
620 | { | |
621 | struct netdev_private *np = netdev_priv(dev); | |
622 | void __iomem *mdio_addr = np->base_addr + MIICtrl; | |
623 | int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; | |
624 | int i; | |
625 | ||
626 | if (location == 4 && phy_id == np->phys[0]) | |
627 | np->mii_if.advertising = value; | |
628 | ||
629 | if (mii_preamble_required) | |
630 | mdio_sync(mdio_addr); | |
631 | ||
632 | /* Shift the command bits out. */ | |
633 | for (i = 31; i >= 0; i--) { | |
634 | int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; | |
635 | ||
636 | iowrite32(dataval, mdio_addr); | |
637 | mdio_delay(mdio_addr); | |
638 | iowrite32(dataval | MDIO_ShiftClk, mdio_addr); | |
639 | mdio_delay(mdio_addr); | |
640 | } | |
641 | /* Clear out extra bits. */ | |
642 | for (i = 2; i > 0; i--) { | |
643 | iowrite32(MDIO_EnbIn, mdio_addr); | |
644 | mdio_delay(mdio_addr); | |
645 | iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); | |
646 | mdio_delay(mdio_addr); | |
647 | } | |
648 | return; | |
649 | } | |
650 | ||
f3b197ac | 651 | |
1da177e4 LT |
652 | static int netdev_open(struct net_device *dev) |
653 | { | |
654 | struct netdev_private *np = netdev_priv(dev); | |
655 | void __iomem *ioaddr = np->base_addr; | |
656 | int i; | |
657 | ||
658 | iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */ | |
659 | ||
660 | netif_device_detach(dev); | |
1fb9df5d | 661 | i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev); |
1da177e4 LT |
662 | if (i) |
663 | goto out_err; | |
664 | ||
665 | if (debug > 1) | |
666 | printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n", | |
667 | dev->name, dev->irq); | |
668 | ||
669 | if((i=alloc_ringdesc(dev))) | |
670 | goto out_err; | |
671 | ||
672 | spin_lock_irq(&np->lock); | |
673 | netif_device_attach(dev); | |
674 | init_registers(dev); | |
675 | spin_unlock_irq(&np->lock); | |
676 | ||
677 | netif_start_queue(dev); | |
678 | if (debug > 2) | |
679 | printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); | |
680 | ||
681 | /* Set the timer to check for link beat. */ | |
682 | init_timer(&np->timer); | |
683 | np->timer.expires = jiffies + 1*HZ; | |
684 | np->timer.data = (unsigned long)dev; | |
685 | np->timer.function = &netdev_timer; /* timer handler */ | |
686 | add_timer(&np->timer); | |
687 | return 0; | |
688 | out_err: | |
689 | netif_device_attach(dev); | |
690 | return i; | |
691 | } | |
692 | ||
693 | #define MII_DAVICOM_DM9101 0x0181b800 | |
694 | ||
695 | static int update_link(struct net_device *dev) | |
696 | { | |
697 | struct netdev_private *np = netdev_priv(dev); | |
698 | int duplex, fasteth, result, mii_reg; | |
699 | ||
700 | /* BSMR */ | |
701 | mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); | |
702 | ||
703 | if (mii_reg == 0xffff) | |
704 | return np->csr6; | |
705 | /* reread: the link status bit is sticky */ | |
706 | mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); | |
707 | if (!(mii_reg & 0x4)) { | |
708 | if (netif_carrier_ok(dev)) { | |
709 | if (debug) | |
710 | printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n", | |
711 | dev->name, np->phys[0]); | |
712 | netif_carrier_off(dev); | |
713 | } | |
714 | return np->csr6; | |
715 | } | |
716 | if (!netif_carrier_ok(dev)) { | |
717 | if (debug) | |
718 | printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n", | |
719 | dev->name, np->phys[0]); | |
720 | netif_carrier_on(dev); | |
721 | } | |
f3b197ac | 722 | |
1da177e4 LT |
723 | if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) { |
724 | /* If the link partner doesn't support autonegotiation | |
725 | * the MII detects it's abilities with the "parallel detection". | |
726 | * Some MIIs update the LPA register to the result of the parallel | |
727 | * detection, some don't. | |
728 | * The Davicom PHY [at least 0181b800] doesn't. | |
729 | * Instead bit 9 and 13 of the BMCR are updated to the result | |
730 | * of the negotiation.. | |
731 | */ | |
732 | mii_reg = mdio_read(dev, np->phys[0], MII_BMCR); | |
733 | duplex = mii_reg & BMCR_FULLDPLX; | |
734 | fasteth = mii_reg & BMCR_SPEED100; | |
735 | } else { | |
736 | int negotiated; | |
737 | mii_reg = mdio_read(dev, np->phys[0], MII_LPA); | |
738 | negotiated = mii_reg & np->mii_if.advertising; | |
739 | ||
740 | duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL); | |
741 | fasteth = negotiated & 0x380; | |
742 | } | |
743 | duplex |= np->mii_if.force_media; | |
744 | /* remove fastether and fullduplex */ | |
745 | result = np->csr6 & ~0x20000200; | |
746 | if (duplex) | |
747 | result |= 0x200; | |
748 | if (fasteth) | |
749 | result |= 0x20000000; | |
750 | if (result != np->csr6 && debug) | |
751 | printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n", | |
f3b197ac | 752 | dev->name, fasteth ? 100 : 10, |
1da177e4 LT |
753 | duplex ? "full" : "half", np->phys[0]); |
754 | return result; | |
755 | } | |
756 | ||
757 | #define RXTX_TIMEOUT 2000 | |
758 | static inline void update_csr6(struct net_device *dev, int new) | |
759 | { | |
760 | struct netdev_private *np = netdev_priv(dev); | |
761 | void __iomem *ioaddr = np->base_addr; | |
762 | int limit = RXTX_TIMEOUT; | |
763 | ||
764 | if (!netif_device_present(dev)) | |
765 | new = 0; | |
766 | if (new==np->csr6) | |
767 | return; | |
768 | /* stop both Tx and Rx processes */ | |
769 | iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig); | |
770 | /* wait until they have really stopped */ | |
771 | for (;;) { | |
772 | int csr5 = ioread32(ioaddr + IntrStatus); | |
773 | int t; | |
774 | ||
775 | t = (csr5 >> 17) & 0x07; | |
776 | if (t==0||t==1) { | |
777 | /* rx stopped */ | |
778 | t = (csr5 >> 20) & 0x07; | |
779 | if (t==0||t==1) | |
780 | break; | |
781 | } | |
782 | ||
783 | limit--; | |
784 | if(!limit) { | |
785 | printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n", | |
786 | dev->name, csr5); | |
787 | break; | |
788 | } | |
789 | udelay(1); | |
790 | } | |
791 | np->csr6 = new; | |
792 | /* and restart them with the new configuration */ | |
793 | iowrite32(np->csr6, ioaddr + NetworkConfig); | |
794 | if (new & 0x200) | |
795 | np->mii_if.full_duplex = 1; | |
796 | } | |
797 | ||
798 | static void netdev_timer(unsigned long data) | |
799 | { | |
800 | struct net_device *dev = (struct net_device *)data; | |
801 | struct netdev_private *np = netdev_priv(dev); | |
802 | void __iomem *ioaddr = np->base_addr; | |
803 | ||
804 | if (debug > 2) | |
805 | printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " | |
806 | "config %8.8x.\n", | |
807 | dev->name, ioread32(ioaddr + IntrStatus), | |
808 | ioread32(ioaddr + NetworkConfig)); | |
809 | spin_lock_irq(&np->lock); | |
810 | update_csr6(dev, update_link(dev)); | |
811 | spin_unlock_irq(&np->lock); | |
812 | np->timer.expires = jiffies + 10*HZ; | |
813 | add_timer(&np->timer); | |
814 | } | |
815 | ||
816 | static void init_rxtx_rings(struct net_device *dev) | |
817 | { | |
818 | struct netdev_private *np = netdev_priv(dev); | |
819 | int i; | |
820 | ||
821 | np->rx_head_desc = &np->rx_ring[0]; | |
822 | np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE]; | |
823 | ||
824 | /* Initial all Rx descriptors. */ | |
825 | for (i = 0; i < RX_RING_SIZE; i++) { | |
826 | np->rx_ring[i].length = np->rx_buf_sz; | |
827 | np->rx_ring[i].status = 0; | |
828 | np->rx_skbuff[i] = NULL; | |
829 | } | |
830 | /* Mark the last entry as wrapping the ring. */ | |
831 | np->rx_ring[i-1].length |= DescEndRing; | |
832 | ||
833 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | |
834 | for (i = 0; i < RX_RING_SIZE; i++) { | |
835 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); | |
836 | np->rx_skbuff[i] = skb; | |
837 | if (skb == NULL) | |
838 | break; | |
839 | skb->dev = dev; /* Mark as being used by this device. */ | |
689be439 | 840 | np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, |
bb02aacc | 841 | np->rx_buf_sz,PCI_DMA_FROMDEVICE); |
1da177e4 LT |
842 | |
843 | np->rx_ring[i].buffer1 = np->rx_addr[i]; | |
844 | np->rx_ring[i].status = DescOwn; | |
845 | } | |
846 | ||
847 | np->cur_rx = 0; | |
848 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | |
849 | ||
850 | /* Initialize the Tx descriptors */ | |
851 | for (i = 0; i < TX_RING_SIZE; i++) { | |
852 | np->tx_skbuff[i] = NULL; | |
853 | np->tx_ring[i].status = 0; | |
854 | } | |
855 | np->tx_full = 0; | |
856 | np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0; | |
857 | ||
858 | iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr); | |
859 | iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE, | |
860 | np->base_addr + TxRingPtr); | |
861 | ||
862 | } | |
863 | ||
864 | static void free_rxtx_rings(struct netdev_private* np) | |
865 | { | |
866 | int i; | |
867 | /* Free all the skbuffs in the Rx queue. */ | |
868 | for (i = 0; i < RX_RING_SIZE; i++) { | |
869 | np->rx_ring[i].status = 0; | |
870 | if (np->rx_skbuff[i]) { | |
871 | pci_unmap_single(np->pci_dev, | |
872 | np->rx_addr[i], | |
873 | np->rx_skbuff[i]->len, | |
874 | PCI_DMA_FROMDEVICE); | |
875 | dev_kfree_skb(np->rx_skbuff[i]); | |
876 | } | |
877 | np->rx_skbuff[i] = NULL; | |
878 | } | |
879 | for (i = 0; i < TX_RING_SIZE; i++) { | |
880 | if (np->tx_skbuff[i]) { | |
881 | pci_unmap_single(np->pci_dev, | |
882 | np->tx_addr[i], | |
883 | np->tx_skbuff[i]->len, | |
884 | PCI_DMA_TODEVICE); | |
885 | dev_kfree_skb(np->tx_skbuff[i]); | |
886 | } | |
887 | np->tx_skbuff[i] = NULL; | |
888 | } | |
889 | } | |
890 | ||
891 | static void init_registers(struct net_device *dev) | |
892 | { | |
893 | struct netdev_private *np = netdev_priv(dev); | |
894 | void __iomem *ioaddr = np->base_addr; | |
895 | int i; | |
896 | ||
897 | for (i = 0; i < 6; i++) | |
898 | iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); | |
899 | ||
900 | /* Initialize other registers. */ | |
901 | #ifdef __BIG_ENDIAN | |
902 | i = (1<<20); /* Big-endian descriptors */ | |
903 | #else | |
904 | i = 0; | |
905 | #endif | |
906 | i |= (0x04<<2); /* skip length 4 u32 */ | |
907 | i |= 0x02; /* give Rx priority */ | |
908 | ||
909 | /* Configure the PCI bus bursts and FIFO thresholds. | |
910 | 486: Set 8 longword cache alignment, 8 longword burst. | |
911 | 586: Set 16 longword cache alignment, no burst limit. | |
912 | Cache alignment bits 15:14 Burst length 13:8 | |
913 | 0000 <not allowed> 0000 align to cache 0800 8 longwords | |
914 | 4000 8 longwords 0100 1 longword 1000 16 longwords | |
915 | 8000 16 longwords 0200 2 longwords 2000 32 longwords | |
916 | C000 32 longwords 0400 4 longwords */ | |
917 | ||
918 | #if defined (__i386__) && !defined(MODULE) | |
919 | /* When not a module we can work around broken '486 PCI boards. */ | |
920 | if (boot_cpu_data.x86 <= 4) { | |
921 | i |= 0x4800; | |
922 | printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache " | |
923 | "alignment to 8 longwords.\n", dev->name); | |
924 | } else { | |
925 | i |= 0xE000; | |
926 | } | |
927 | #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) | |
928 | i |= 0xE000; | |
929 | #elif defined(__sparc__) | |
930 | i |= 0x4800; | |
931 | #else | |
932 | #warning Processor architecture undefined | |
933 | i |= 0x4800; | |
934 | #endif | |
935 | iowrite32(i, ioaddr + PCIBusCfg); | |
936 | ||
937 | np->csr6 = 0; | |
f3b197ac | 938 | /* 128 byte Tx threshold; |
1da177e4 LT |
939 | Transmit on; Receive on; */ |
940 | update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev)); | |
941 | ||
942 | /* Clear and Enable interrupts by setting the interrupt mask. */ | |
943 | iowrite32(0x1A0F5, ioaddr + IntrStatus); | |
944 | iowrite32(0x1A0F5, ioaddr + IntrEnable); | |
945 | ||
946 | iowrite32(0, ioaddr + RxStartDemand); | |
947 | } | |
948 | ||
949 | static void tx_timeout(struct net_device *dev) | |
950 | { | |
951 | struct netdev_private *np = netdev_priv(dev); | |
952 | void __iomem *ioaddr = np->base_addr; | |
953 | ||
954 | printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," | |
955 | " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus)); | |
956 | ||
957 | { | |
958 | int i; | |
959 | printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); | |
960 | for (i = 0; i < RX_RING_SIZE; i++) | |
961 | printk(" %8.8x", (unsigned int)np->rx_ring[i].status); | |
962 | printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring); | |
963 | for (i = 0; i < TX_RING_SIZE; i++) | |
964 | printk(" %8.8x", np->tx_ring[i].status); | |
965 | printk("\n"); | |
966 | } | |
967 | printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n", | |
968 | np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); | |
969 | printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C)); | |
970 | ||
971 | disable_irq(dev->irq); | |
972 | spin_lock_irq(&np->lock); | |
973 | /* | |
974 | * Under high load dirty_tx and the internal tx descriptor pointer | |
975 | * come out of sync, thus perform a software reset and reinitialize | |
976 | * everything. | |
977 | */ | |
978 | ||
979 | iowrite32(1, np->base_addr+PCIBusCfg); | |
980 | udelay(1); | |
981 | ||
982 | free_rxtx_rings(np); | |
983 | init_rxtx_rings(dev); | |
984 | init_registers(dev); | |
985 | spin_unlock_irq(&np->lock); | |
986 | enable_irq(dev->irq); | |
987 | ||
988 | netif_wake_queue(dev); | |
989 | dev->trans_start = jiffies; | |
990 | np->stats.tx_errors++; | |
991 | return; | |
992 | } | |
993 | ||
994 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ | |
995 | static int alloc_ringdesc(struct net_device *dev) | |
996 | { | |
997 | struct netdev_private *np = netdev_priv(dev); | |
998 | ||
999 | np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); | |
1000 | ||
1001 | np->rx_ring = pci_alloc_consistent(np->pci_dev, | |
1002 | sizeof(struct w840_rx_desc)*RX_RING_SIZE + | |
1003 | sizeof(struct w840_tx_desc)*TX_RING_SIZE, | |
1004 | &np->ring_dma_addr); | |
1005 | if(!np->rx_ring) | |
1006 | return -ENOMEM; | |
1007 | init_rxtx_rings(dev); | |
1008 | return 0; | |
1009 | } | |
1010 | ||
1011 | static void free_ringdesc(struct netdev_private *np) | |
1012 | { | |
1013 | pci_free_consistent(np->pci_dev, | |
1014 | sizeof(struct w840_rx_desc)*RX_RING_SIZE + | |
1015 | sizeof(struct w840_tx_desc)*TX_RING_SIZE, | |
1016 | np->rx_ring, np->ring_dma_addr); | |
1017 | ||
1018 | } | |
1019 | ||
1020 | static int start_tx(struct sk_buff *skb, struct net_device *dev) | |
1021 | { | |
1022 | struct netdev_private *np = netdev_priv(dev); | |
1023 | unsigned entry; | |
1024 | ||
1025 | /* Caution: the write order is important here, set the field | |
1026 | with the "ownership" bits last. */ | |
1027 | ||
1028 | /* Calculate the next Tx descriptor entry. */ | |
1029 | entry = np->cur_tx % TX_RING_SIZE; | |
1030 | ||
1031 | np->tx_addr[entry] = pci_map_single(np->pci_dev, | |
1032 | skb->data,skb->len, PCI_DMA_TODEVICE); | |
1033 | np->tx_skbuff[entry] = skb; | |
1034 | ||
1035 | np->tx_ring[entry].buffer1 = np->tx_addr[entry]; | |
1036 | if (skb->len < TX_BUFLIMIT) { | |
1037 | np->tx_ring[entry].length = DescWholePkt | skb->len; | |
1038 | } else { | |
1039 | int len = skb->len - TX_BUFLIMIT; | |
1040 | ||
1041 | np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT; | |
1042 | np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT; | |
1043 | } | |
1044 | if(entry == TX_RING_SIZE-1) | |
1045 | np->tx_ring[entry].length |= DescEndRing; | |
1046 | ||
1047 | /* Now acquire the irq spinlock. | |
1048 | * The difficult race is the the ordering between | |
1049 | * increasing np->cur_tx and setting DescOwn: | |
1050 | * - if np->cur_tx is increased first the interrupt | |
1051 | * handler could consider the packet as transmitted | |
1052 | * since DescOwn is cleared. | |
1053 | * - If DescOwn is set first the NIC could report the | |
1054 | * packet as sent, but the interrupt handler would ignore it | |
1055 | * since the np->cur_tx was not yet increased. | |
1056 | */ | |
1057 | spin_lock_irq(&np->lock); | |
1058 | np->cur_tx++; | |
1059 | ||
1060 | wmb(); /* flush length, buffer1, buffer2 */ | |
1061 | np->tx_ring[entry].status = DescOwn; | |
1062 | wmb(); /* flush status and kick the hardware */ | |
1063 | iowrite32(0, np->base_addr + TxStartDemand); | |
1064 | np->tx_q_bytes += skb->len; | |
1065 | /* Work around horrible bug in the chip by marking the queue as full | |
1066 | when we do not have FIFO room for a maximum sized packet. */ | |
1067 | if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN || | |
1068 | ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) { | |
1069 | netif_stop_queue(dev); | |
1070 | wmb(); | |
1071 | np->tx_full = 1; | |
1072 | } | |
1073 | spin_unlock_irq(&np->lock); | |
1074 | ||
1075 | dev->trans_start = jiffies; | |
1076 | ||
1077 | if (debug > 4) { | |
1078 | printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", | |
1079 | dev->name, np->cur_tx, entry); | |
1080 | } | |
1081 | return 0; | |
1082 | } | |
1083 | ||
1084 | static void netdev_tx_done(struct net_device *dev) | |
1085 | { | |
1086 | struct netdev_private *np = netdev_priv(dev); | |
1087 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { | |
1088 | int entry = np->dirty_tx % TX_RING_SIZE; | |
1089 | int tx_status = np->tx_ring[entry].status; | |
1090 | ||
1091 | if (tx_status < 0) | |
1092 | break; | |
1093 | if (tx_status & 0x8000) { /* There was an error, log it. */ | |
1094 | #ifndef final_version | |
1095 | if (debug > 1) | |
1096 | printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", | |
1097 | dev->name, tx_status); | |
1098 | #endif | |
1099 | np->stats.tx_errors++; | |
1100 | if (tx_status & 0x0104) np->stats.tx_aborted_errors++; | |
1101 | if (tx_status & 0x0C80) np->stats.tx_carrier_errors++; | |
1102 | if (tx_status & 0x0200) np->stats.tx_window_errors++; | |
1103 | if (tx_status & 0x0002) np->stats.tx_fifo_errors++; | |
1104 | if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0) | |
1105 | np->stats.tx_heartbeat_errors++; | |
1106 | } else { | |
1107 | #ifndef final_version | |
1108 | if (debug > 3) | |
1109 | printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n", | |
1110 | dev->name, entry, tx_status); | |
1111 | #endif | |
1112 | np->stats.tx_bytes += np->tx_skbuff[entry]->len; | |
1113 | np->stats.collisions += (tx_status >> 3) & 15; | |
1114 | np->stats.tx_packets++; | |
1115 | } | |
1116 | /* Free the original skb. */ | |
1117 | pci_unmap_single(np->pci_dev,np->tx_addr[entry], | |
1118 | np->tx_skbuff[entry]->len, | |
1119 | PCI_DMA_TODEVICE); | |
1120 | np->tx_q_bytes -= np->tx_skbuff[entry]->len; | |
1121 | dev_kfree_skb_irq(np->tx_skbuff[entry]); | |
1122 | np->tx_skbuff[entry] = NULL; | |
1123 | } | |
1124 | if (np->tx_full && | |
1125 | np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART && | |
1126 | np->tx_q_bytes < TX_BUG_FIFO_LIMIT) { | |
1127 | /* The ring is no longer full, clear tbusy. */ | |
1128 | np->tx_full = 0; | |
1129 | wmb(); | |
1130 | netif_wake_queue(dev); | |
1131 | } | |
1132 | } | |
1133 | ||
1134 | /* The interrupt handler does all of the Rx thread work and cleans up | |
1135 | after the Tx thread. */ | |
1136 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) | |
1137 | { | |
1138 | struct net_device *dev = (struct net_device *)dev_instance; | |
1139 | struct netdev_private *np = netdev_priv(dev); | |
1140 | void __iomem *ioaddr = np->base_addr; | |
1141 | int work_limit = max_interrupt_work; | |
1142 | int handled = 0; | |
1143 | ||
1144 | if (!netif_device_present(dev)) | |
1145 | return IRQ_NONE; | |
1146 | do { | |
1147 | u32 intr_status = ioread32(ioaddr + IntrStatus); | |
1148 | ||
1149 | /* Acknowledge all of the current interrupt sources ASAP. */ | |
1150 | iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus); | |
1151 | ||
1152 | if (debug > 4) | |
1153 | printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", | |
1154 | dev->name, intr_status); | |
1155 | ||
1156 | if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) | |
1157 | break; | |
1158 | ||
1159 | handled = 1; | |
1160 | ||
1161 | if (intr_status & (IntrRxDone | RxNoBuf)) | |
1162 | netdev_rx(dev); | |
1163 | if (intr_status & RxNoBuf) | |
1164 | iowrite32(0, ioaddr + RxStartDemand); | |
1165 | ||
1166 | if (intr_status & (TxIdle | IntrTxDone) && | |
1167 | np->cur_tx != np->dirty_tx) { | |
1168 | spin_lock(&np->lock); | |
1169 | netdev_tx_done(dev); | |
1170 | spin_unlock(&np->lock); | |
1171 | } | |
1172 | ||
1173 | /* Abnormal error summary/uncommon events handlers. */ | |
1174 | if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr | | |
1175 | TimerInt | IntrTxStopped)) | |
1176 | netdev_error(dev, intr_status); | |
1177 | ||
1178 | if (--work_limit < 0) { | |
1179 | printk(KERN_WARNING "%s: Too much work at interrupt, " | |
1180 | "status=0x%4.4x.\n", dev->name, intr_status); | |
1181 | /* Set the timer to re-enable the other interrupts after | |
1182 | 10*82usec ticks. */ | |
1183 | spin_lock(&np->lock); | |
1184 | if (netif_device_present(dev)) { | |
1185 | iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable); | |
1186 | iowrite32(10, ioaddr + GPTimer); | |
1187 | } | |
1188 | spin_unlock(&np->lock); | |
1189 | break; | |
1190 | } | |
1191 | } while (1); | |
1192 | ||
1193 | if (debug > 3) | |
1194 | printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", | |
1195 | dev->name, ioread32(ioaddr + IntrStatus)); | |
1196 | return IRQ_RETVAL(handled); | |
1197 | } | |
1198 | ||
1199 | /* This routine is logically part of the interrupt handler, but separated | |
1200 | for clarity and better register allocation. */ | |
1201 | static int netdev_rx(struct net_device *dev) | |
1202 | { | |
1203 | struct netdev_private *np = netdev_priv(dev); | |
1204 | int entry = np->cur_rx % RX_RING_SIZE; | |
1205 | int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; | |
1206 | ||
1207 | if (debug > 4) { | |
1208 | printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n", | |
1209 | entry, np->rx_ring[entry].status); | |
1210 | } | |
1211 | ||
1212 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ | |
1213 | while (--work_limit >= 0) { | |
1214 | struct w840_rx_desc *desc = np->rx_head_desc; | |
1215 | s32 status = desc->status; | |
1216 | ||
1217 | if (debug > 4) | |
1218 | printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", | |
1219 | status); | |
1220 | if (status < 0) | |
1221 | break; | |
1222 | if ((status & 0x38008300) != 0x0300) { | |
1223 | if ((status & 0x38000300) != 0x0300) { | |
1224 | /* Ingore earlier buffers. */ | |
1225 | if ((status & 0xffff) != 0x7fff) { | |
1226 | printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " | |
1227 | "multiple buffers, entry %#x status %4.4x!\n", | |
1228 | dev->name, np->cur_rx, status); | |
1229 | np->stats.rx_length_errors++; | |
1230 | } | |
1231 | } else if (status & 0x8000) { | |
1232 | /* There was a fatal error. */ | |
1233 | if (debug > 2) | |
1234 | printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", | |
1235 | dev->name, status); | |
1236 | np->stats.rx_errors++; /* end of a packet.*/ | |
1237 | if (status & 0x0890) np->stats.rx_length_errors++; | |
1238 | if (status & 0x004C) np->stats.rx_frame_errors++; | |
1239 | if (status & 0x0002) np->stats.rx_crc_errors++; | |
1240 | } | |
1241 | } else { | |
1242 | struct sk_buff *skb; | |
1243 | /* Omit the four octet CRC from the length. */ | |
1244 | int pkt_len = ((status >> 16) & 0x7ff) - 4; | |
1245 | ||
1246 | #ifndef final_version | |
1247 | if (debug > 4) | |
1248 | printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" | |
1249 | " status %x.\n", pkt_len, status); | |
1250 | #endif | |
1251 | /* Check if the packet is long enough to accept without copying | |
1252 | to a minimally-sized skbuff. */ | |
1253 | if (pkt_len < rx_copybreak | |
1254 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | |
1255 | skb->dev = dev; | |
1256 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | |
1257 | pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], | |
1258 | np->rx_skbuff[entry]->len, | |
1259 | PCI_DMA_FROMDEVICE); | |
689be439 | 1260 | eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); |
1da177e4 LT |
1261 | skb_put(skb, pkt_len); |
1262 | pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], | |
1263 | np->rx_skbuff[entry]->len, | |
1264 | PCI_DMA_FROMDEVICE); | |
1265 | } else { | |
1266 | pci_unmap_single(np->pci_dev,np->rx_addr[entry], | |
1267 | np->rx_skbuff[entry]->len, | |
1268 | PCI_DMA_FROMDEVICE); | |
1269 | skb_put(skb = np->rx_skbuff[entry], pkt_len); | |
1270 | np->rx_skbuff[entry] = NULL; | |
1271 | } | |
1272 | #ifndef final_version /* Remove after testing. */ | |
1273 | /* You will want this info for the initial debug. */ | |
1274 | if (debug > 5) | |
1275 | printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:" | |
1276 | "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x " | |
1277 | "%d.%d.%d.%d.\n", | |
1278 | skb->data[0], skb->data[1], skb->data[2], skb->data[3], | |
1279 | skb->data[4], skb->data[5], skb->data[6], skb->data[7], | |
1280 | skb->data[8], skb->data[9], skb->data[10], | |
1281 | skb->data[11], skb->data[12], skb->data[13], | |
1282 | skb->data[14], skb->data[15], skb->data[16], | |
1283 | skb->data[17]); | |
1284 | #endif | |
1285 | skb->protocol = eth_type_trans(skb, dev); | |
1286 | netif_rx(skb); | |
1287 | dev->last_rx = jiffies; | |
1288 | np->stats.rx_packets++; | |
1289 | np->stats.rx_bytes += pkt_len; | |
1290 | } | |
1291 | entry = (++np->cur_rx) % RX_RING_SIZE; | |
1292 | np->rx_head_desc = &np->rx_ring[entry]; | |
1293 | } | |
1294 | ||
1295 | /* Refill the Rx ring buffers. */ | |
1296 | for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { | |
1297 | struct sk_buff *skb; | |
1298 | entry = np->dirty_rx % RX_RING_SIZE; | |
1299 | if (np->rx_skbuff[entry] == NULL) { | |
1300 | skb = dev_alloc_skb(np->rx_buf_sz); | |
1301 | np->rx_skbuff[entry] = skb; | |
1302 | if (skb == NULL) | |
1303 | break; /* Better luck next round. */ | |
1304 | skb->dev = dev; /* Mark as being used by this device. */ | |
1305 | np->rx_addr[entry] = pci_map_single(np->pci_dev, | |
689be439 | 1306 | skb->data, |
bb02aacc | 1307 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1da177e4 LT |
1308 | np->rx_ring[entry].buffer1 = np->rx_addr[entry]; |
1309 | } | |
1310 | wmb(); | |
1311 | np->rx_ring[entry].status = DescOwn; | |
1312 | } | |
1313 | ||
1314 | return 0; | |
1315 | } | |
1316 | ||
1317 | static void netdev_error(struct net_device *dev, int intr_status) | |
1318 | { | |
1319 | struct netdev_private *np = netdev_priv(dev); | |
1320 | void __iomem *ioaddr = np->base_addr; | |
1321 | ||
1322 | if (debug > 2) | |
1323 | printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n", | |
1324 | dev->name, intr_status); | |
1325 | if (intr_status == 0xffffffff) | |
1326 | return; | |
1327 | spin_lock(&np->lock); | |
1328 | if (intr_status & TxFIFOUnderflow) { | |
1329 | int new; | |
1330 | /* Bump up the Tx threshold */ | |
1331 | #if 0 | |
1332 | /* This causes lots of dropped packets, | |
1333 | * and under high load even tx_timeouts | |
1334 | */ | |
1335 | new = np->csr6 + 0x4000; | |
1336 | #else | |
1337 | new = (np->csr6 >> 14)&0x7f; | |
1338 | if (new < 64) | |
1339 | new *= 2; | |
1340 | else | |
1341 | new = 127; /* load full packet before starting */ | |
1342 | new = (np->csr6 & ~(0x7F << 14)) | (new<<14); | |
1343 | #endif | |
1344 | printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n", | |
1345 | dev->name, new); | |
1346 | update_csr6(dev, new); | |
1347 | } | |
1348 | if (intr_status & IntrRxDied) { /* Missed a Rx frame. */ | |
1349 | np->stats.rx_errors++; | |
1350 | } | |
1351 | if (intr_status & TimerInt) { | |
1352 | /* Re-enable other interrupts. */ | |
1353 | if (netif_device_present(dev)) | |
1354 | iowrite32(0x1A0F5, ioaddr + IntrEnable); | |
1355 | } | |
1356 | np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; | |
1357 | iowrite32(0, ioaddr + RxStartDemand); | |
1358 | spin_unlock(&np->lock); | |
1359 | } | |
1360 | ||
1361 | static struct net_device_stats *get_stats(struct net_device *dev) | |
1362 | { | |
1363 | struct netdev_private *np = netdev_priv(dev); | |
1364 | void __iomem *ioaddr = np->base_addr; | |
1365 | ||
1366 | /* The chip only need report frame silently dropped. */ | |
1367 | spin_lock_irq(&np->lock); | |
1368 | if (netif_running(dev) && netif_device_present(dev)) | |
1369 | np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; | |
1370 | spin_unlock_irq(&np->lock); | |
1371 | ||
1372 | return &np->stats; | |
1373 | } | |
1374 | ||
1375 | ||
1376 | static u32 __set_rx_mode(struct net_device *dev) | |
1377 | { | |
1378 | struct netdev_private *np = netdev_priv(dev); | |
1379 | void __iomem *ioaddr = np->base_addr; | |
1380 | u32 mc_filter[2]; /* Multicast hash filter */ | |
1381 | u32 rx_mode; | |
1382 | ||
1383 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | |
1384 | /* Unconditionally log net taps. */ | |
1385 | printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); | |
1386 | memset(mc_filter, 0xff, sizeof(mc_filter)); | |
1387 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys | |
1388 | | AcceptMyPhys; | |
1389 | } else if ((dev->mc_count > multicast_filter_limit) | |
1390 | || (dev->flags & IFF_ALLMULTI)) { | |
1391 | /* Too many to match, or accept all multicasts. */ | |
1392 | memset(mc_filter, 0xff, sizeof(mc_filter)); | |
1393 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; | |
1394 | } else { | |
1395 | struct dev_mc_list *mclist; | |
1396 | int i; | |
1397 | memset(mc_filter, 0, sizeof(mc_filter)); | |
1398 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; | |
1399 | i++, mclist = mclist->next) { | |
1400 | int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; | |
1401 | filterbit &= 0x3f; | |
1402 | mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); | |
1403 | } | |
1404 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; | |
1405 | } | |
1406 | iowrite32(mc_filter[0], ioaddr + MulticastFilter0); | |
1407 | iowrite32(mc_filter[1], ioaddr + MulticastFilter1); | |
1408 | return rx_mode; | |
1409 | } | |
1410 | ||
1411 | static void set_rx_mode(struct net_device *dev) | |
1412 | { | |
1413 | struct netdev_private *np = netdev_priv(dev); | |
1414 | u32 rx_mode = __set_rx_mode(dev); | |
1415 | spin_lock_irq(&np->lock); | |
1416 | update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode); | |
1417 | spin_unlock_irq(&np->lock); | |
1418 | } | |
1419 | ||
1420 | static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) | |
1421 | { | |
1422 | struct netdev_private *np = netdev_priv(dev); | |
1423 | ||
1424 | strcpy (info->driver, DRV_NAME); | |
1425 | strcpy (info->version, DRV_VERSION); | |
1426 | strcpy (info->bus_info, pci_name(np->pci_dev)); | |
1427 | } | |
1428 | ||
1429 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1430 | { | |
1431 | struct netdev_private *np = netdev_priv(dev); | |
1432 | int rc; | |
1433 | ||
1434 | spin_lock_irq(&np->lock); | |
1435 | rc = mii_ethtool_gset(&np->mii_if, cmd); | |
1436 | spin_unlock_irq(&np->lock); | |
1437 | ||
1438 | return rc; | |
1439 | } | |
1440 | ||
1441 | static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1442 | { | |
1443 | struct netdev_private *np = netdev_priv(dev); | |
1444 | int rc; | |
1445 | ||
1446 | spin_lock_irq(&np->lock); | |
1447 | rc = mii_ethtool_sset(&np->mii_if, cmd); | |
1448 | spin_unlock_irq(&np->lock); | |
1449 | ||
1450 | return rc; | |
1451 | } | |
1452 | ||
1453 | static int netdev_nway_reset(struct net_device *dev) | |
1454 | { | |
1455 | struct netdev_private *np = netdev_priv(dev); | |
1456 | return mii_nway_restart(&np->mii_if); | |
1457 | } | |
1458 | ||
1459 | static u32 netdev_get_link(struct net_device *dev) | |
1460 | { | |
1461 | struct netdev_private *np = netdev_priv(dev); | |
1462 | return mii_link_ok(&np->mii_if); | |
1463 | } | |
1464 | ||
1465 | static u32 netdev_get_msglevel(struct net_device *dev) | |
1466 | { | |
1467 | return debug; | |
1468 | } | |
1469 | ||
1470 | static void netdev_set_msglevel(struct net_device *dev, u32 value) | |
1471 | { | |
1472 | debug = value; | |
1473 | } | |
1474 | ||
1475 | static struct ethtool_ops netdev_ethtool_ops = { | |
1476 | .get_drvinfo = netdev_get_drvinfo, | |
1477 | .get_settings = netdev_get_settings, | |
1478 | .set_settings = netdev_set_settings, | |
1479 | .nway_reset = netdev_nway_reset, | |
1480 | .get_link = netdev_get_link, | |
1481 | .get_msglevel = netdev_get_msglevel, | |
1482 | .set_msglevel = netdev_set_msglevel, | |
1483 | .get_sg = ethtool_op_get_sg, | |
1484 | .get_tx_csum = ethtool_op_get_tx_csum, | |
1485 | }; | |
1486 | ||
1487 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1488 | { | |
1489 | struct mii_ioctl_data *data = if_mii(rq); | |
1490 | struct netdev_private *np = netdev_priv(dev); | |
1491 | ||
1492 | switch(cmd) { | |
1493 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ | |
1494 | data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f; | |
1495 | /* Fall Through */ | |
1496 | ||
1497 | case SIOCGMIIREG: /* Read MII PHY register. */ | |
1498 | spin_lock_irq(&np->lock); | |
1499 | data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f); | |
1500 | spin_unlock_irq(&np->lock); | |
1501 | return 0; | |
1502 | ||
1503 | case SIOCSMIIREG: /* Write MII PHY register. */ | |
1504 | if (!capable(CAP_NET_ADMIN)) | |
1505 | return -EPERM; | |
1506 | spin_lock_irq(&np->lock); | |
1507 | mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); | |
1508 | spin_unlock_irq(&np->lock); | |
1509 | return 0; | |
1510 | default: | |
1511 | return -EOPNOTSUPP; | |
1512 | } | |
1513 | } | |
1514 | ||
1515 | static int netdev_close(struct net_device *dev) | |
1516 | { | |
1517 | struct netdev_private *np = netdev_priv(dev); | |
1518 | void __iomem *ioaddr = np->base_addr; | |
1519 | ||
1520 | netif_stop_queue(dev); | |
1521 | ||
1522 | if (debug > 1) { | |
1523 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x " | |
1524 | "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus), | |
1525 | ioread32(ioaddr + NetworkConfig)); | |
1526 | printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", | |
1527 | dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); | |
1528 | } | |
1529 | ||
1530 | /* Stop the chip's Tx and Rx processes. */ | |
1531 | spin_lock_irq(&np->lock); | |
1532 | netif_device_detach(dev); | |
1533 | update_csr6(dev, 0); | |
1534 | iowrite32(0x0000, ioaddr + IntrEnable); | |
1535 | spin_unlock_irq(&np->lock); | |
1536 | ||
1537 | free_irq(dev->irq, dev); | |
1538 | wmb(); | |
1539 | netif_device_attach(dev); | |
1540 | ||
1541 | if (ioread32(ioaddr + NetworkConfig) != 0xffffffff) | |
1542 | np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; | |
1543 | ||
1544 | #ifdef __i386__ | |
1545 | if (debug > 2) { | |
1546 | int i; | |
1547 | ||
1548 | printk(KERN_DEBUG" Tx ring at %8.8x:\n", | |
1549 | (int)np->tx_ring); | |
1550 | for (i = 0; i < TX_RING_SIZE; i++) | |
1551 | printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n", | |
1552 | i, np->tx_ring[i].length, | |
1553 | np->tx_ring[i].status, np->tx_ring[i].buffer1); | |
1554 | printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", | |
1555 | (int)np->rx_ring); | |
1556 | for (i = 0; i < RX_RING_SIZE; i++) { | |
1557 | printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", | |
1558 | i, np->rx_ring[i].length, | |
1559 | np->rx_ring[i].status, np->rx_ring[i].buffer1); | |
1560 | } | |
1561 | } | |
1562 | #endif /* __i386__ debugging only */ | |
1563 | ||
1564 | del_timer_sync(&np->timer); | |
1565 | ||
1566 | free_rxtx_rings(np); | |
1567 | free_ringdesc(np); | |
1568 | ||
1569 | return 0; | |
1570 | } | |
1571 | ||
1572 | static void __devexit w840_remove1 (struct pci_dev *pdev) | |
1573 | { | |
1574 | struct net_device *dev = pci_get_drvdata(pdev); | |
f3b197ac | 1575 | |
1da177e4 LT |
1576 | if (dev) { |
1577 | struct netdev_private *np = netdev_priv(dev); | |
1578 | unregister_netdev(dev); | |
1579 | pci_release_regions(pdev); | |
1580 | pci_iounmap(pdev, np->base_addr); | |
1581 | free_netdev(dev); | |
1582 | } | |
1583 | ||
1584 | pci_set_drvdata(pdev, NULL); | |
1585 | } | |
1586 | ||
1587 | #ifdef CONFIG_PM | |
1588 | ||
1589 | /* | |
1590 | * suspend/resume synchronization: | |
1591 | * - open, close, do_ioctl: | |
1592 | * rtnl_lock, & netif_device_detach after the rtnl_unlock. | |
1593 | * - get_stats: | |
1594 | * spin_lock_irq(np->lock), doesn't touch hw if not present | |
1595 | * - hard_start_xmit: | |
932ff279 | 1596 | * synchronize_irq + netif_tx_disable; |
1da177e4 | 1597 | * - tx_timeout: |
932ff279 | 1598 | * netif_device_detach + netif_tx_disable; |
1da177e4 | 1599 | * - set_multicast_list |
932ff279 | 1600 | * netif_device_detach + netif_tx_disable; |
1da177e4 LT |
1601 | * - interrupt handler |
1602 | * doesn't touch hw if not present, synchronize_irq waits for | |
1603 | * running instances of the interrupt handler. | |
1604 | * | |
1605 | * Disabling hw requires clearing csr6 & IntrEnable. | |
1606 | * update_csr6 & all function that write IntrEnable check netif_device_present | |
1607 | * before settings any bits. | |
1608 | * | |
1609 | * Detach must occur under spin_unlock_irq(), interrupts from a detached | |
1610 | * device would cause an irq storm. | |
1611 | */ | |
05adc3b7 | 1612 | static int w840_suspend (struct pci_dev *pdev, pm_message_t state) |
1da177e4 LT |
1613 | { |
1614 | struct net_device *dev = pci_get_drvdata (pdev); | |
1615 | struct netdev_private *np = netdev_priv(dev); | |
1616 | void __iomem *ioaddr = np->base_addr; | |
1617 | ||
1618 | rtnl_lock(); | |
1619 | if (netif_running (dev)) { | |
1620 | del_timer_sync(&np->timer); | |
1621 | ||
1622 | spin_lock_irq(&np->lock); | |
1623 | netif_device_detach(dev); | |
1624 | update_csr6(dev, 0); | |
1625 | iowrite32(0, ioaddr + IntrEnable); | |
1da177e4 LT |
1626 | spin_unlock_irq(&np->lock); |
1627 | ||
1da177e4 | 1628 | synchronize_irq(dev->irq); |
932ff279 | 1629 | netif_tx_disable(dev); |
1da177e4 LT |
1630 | |
1631 | np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; | |
1632 | ||
1633 | /* no more hardware accesses behind this line. */ | |
1634 | ||
cca4aa83 | 1635 | BUG_ON(np->csr6); |
1da177e4 LT |
1636 | if (ioread32(ioaddr + IntrEnable)) BUG(); |
1637 | ||
1638 | /* pci_power_off(pdev, -1); */ | |
1639 | ||
1640 | free_rxtx_rings(np); | |
1641 | } else { | |
1642 | netif_device_detach(dev); | |
1643 | } | |
1644 | rtnl_unlock(); | |
1645 | return 0; | |
1646 | } | |
1647 | ||
1648 | static int w840_resume (struct pci_dev *pdev) | |
1649 | { | |
1650 | struct net_device *dev = pci_get_drvdata (pdev); | |
1651 | struct netdev_private *np = netdev_priv(dev); | |
1652 | ||
1653 | rtnl_lock(); | |
1654 | if (netif_device_present(dev)) | |
1655 | goto out; /* device not suspended */ | |
1656 | if (netif_running(dev)) { | |
1657 | pci_enable_device(pdev); | |
1658 | /* pci_power_on(pdev); */ | |
1659 | ||
1660 | spin_lock_irq(&np->lock); | |
1661 | iowrite32(1, np->base_addr+PCIBusCfg); | |
1662 | ioread32(np->base_addr+PCIBusCfg); | |
1663 | udelay(1); | |
1664 | netif_device_attach(dev); | |
1665 | init_rxtx_rings(dev); | |
1666 | init_registers(dev); | |
1667 | spin_unlock_irq(&np->lock); | |
1668 | ||
1669 | netif_wake_queue(dev); | |
1670 | ||
1671 | mod_timer(&np->timer, jiffies + 1*HZ); | |
1672 | } else { | |
1673 | netif_device_attach(dev); | |
1674 | } | |
1675 | out: | |
1676 | rtnl_unlock(); | |
1677 | return 0; | |
1678 | } | |
1679 | #endif | |
1680 | ||
1681 | static struct pci_driver w840_driver = { | |
1682 | .name = DRV_NAME, | |
1683 | .id_table = w840_pci_tbl, | |
1684 | .probe = w840_probe1, | |
1685 | .remove = __devexit_p(w840_remove1), | |
1686 | #ifdef CONFIG_PM | |
1687 | .suspend = w840_suspend, | |
1688 | .resume = w840_resume, | |
1689 | #endif | |
1690 | }; | |
1691 | ||
1692 | static int __init w840_init(void) | |
1693 | { | |
1694 | printk(version); | |
1695 | return pci_module_init(&w840_driver); | |
1696 | } | |
1697 | ||
1698 | static void __exit w840_exit(void) | |
1699 | { | |
1700 | pci_unregister_driver(&w840_driver); | |
1701 | } | |
1702 | ||
1703 | module_init(w840_init); | |
1704 | module_exit(w840_exit); |