Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3 Written 2002-2004 by David Dillow <dave@thedillows.org>
4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This software is available on a public web site. It may enable
15 cryptographic capabilities of the 3Com hardware, and may be
16 exported from the United States under License Exception "TSU"
17 pursuant to 15 C.F.R. Section 740.13(e).
18
19 This work was funded by the National Library of Medicine under
20 the Department of Energy project number 0274DD06D1 and NLM project
21 number Y1-LM-2015-01.
22
23 This driver is designed for the 3Com 3CR990 Family of cards with the
24 3XP Processor. It has been tested on x86 and sparc64.
25
26 KNOWN ISSUES:
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
30 get this fixed.
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable
34 polling. Only significant for getting stats and creating
35 SAs, but an ugly wart never the less.
36
37 TODO:
38 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39 *) Add more support for ethtool (especially for NIC stats)
40 *) Allow disabling of RX checksum offloading
41 *) Fix MAC changing to work while the interface is up
42 (Need to put commands on the TX ring, which changes
43 the locking)
44 *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49 * Setting to > 1518 effectively disables this feature.
50 */
51 static int rx_copybreak = 200;
52
53 /* Should we use MMIO or Port IO?
54 * 0: Port IO
55 * 1: MMIO
56 * 2: Try MMIO, fallback to Port IO
57 */
58 static unsigned int use_mmio = 2;
59
60 /* end user-configurable values */
61
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 */
64 static const int multicast_filter_limit = 32;
65
66 /* Operational parameters that are set at compile time. */
67
68 /* Keep the ring sizes a power of two for compile efficiency.
69 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70 * Making the Tx ring too large decreases the effectiveness of channel
71 * bonding and packet priority.
72 * There are no ill effects from too-large receive rings.
73 *
74 * We don't currently use the Hi Tx ring so, don't make it very big.
75 *
76 * Beware that if we start using the Hi Tx ring, we will need to change
77 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78 */
79 #define TXHI_ENTRIES 2
80 #define TXLO_ENTRIES 128
81 #define RX_ENTRIES 32
82 #define COMMAND_ENTRIES 16
83 #define RESPONSE_ENTRIES 32
84
85 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88 /* The 3XP will preload and remove 64 entries from the free buffer
89 * list, and we need one entry to keep the ring from wrapping, so
90 * to keep this a power of two, we use 128 entries.
91 */
92 #define RXFREE_ENTRIES 128
93 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
94
95 /* Operational parameters that usually are not changed. */
96
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT (2*HZ)
99
100 #define PKT_BUF_SZ 1536
101
102 #define DRV_MODULE_NAME "typhoon"
103 #define DRV_MODULE_VERSION "1.5.8"
104 #define DRV_MODULE_RELDATE "06/11/09"
105 #define PFX DRV_MODULE_NAME ": "
106 #define ERR_PFX KERN_ERR PFX
107
108 #include <linux/module.h>
109 #include <linux/kernel.h>
110 #include <linux/string.h>
111 #include <linux/timer.h>
112 #include <linux/errno.h>
113 #include <linux/ioport.h>
114 #include <linux/slab.h>
115 #include <linux/interrupt.h>
116 #include <linux/pci.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/skbuff.h>
120 #include <linux/init.h>
121 #include <linux/delay.h>
122 #include <linux/ethtool.h>
123 #include <linux/if_vlan.h>
124 #include <linux/crc32.h>
125 #include <linux/bitops.h>
126 #include <asm/processor.h>
127 #include <asm/io.h>
128 #include <asm/uaccess.h>
129 #include <linux/in6.h>
130 #include <asm/checksum.h>
131 #include <linux/version.h>
132 #include <linux/dma-mapping.h>
133
134 #include "typhoon.h"
135 #include "typhoon-firmware.h"
136
137 static const char version[] __devinitdata =
138 "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142 MODULE_LICENSE("GPL");
143 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
144 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
145 "the buffer given back to the NIC. Default "
146 "is 200.");
147 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
148 "Default is to try MMIO and fallback to PIO.");
149 module_param(rx_copybreak, int, 0);
150 module_param(use_mmio, int, 0);
151
152 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
153 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
154 #undef NETIF_F_TSO
155 #endif
156
157 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
158 #error TX ring too small!
159 #endif
160
161 struct typhoon_card_info {
162 char *name;
163 int capabilities;
164 };
165
166 #define TYPHOON_CRYPTO_NONE 0x00
167 #define TYPHOON_CRYPTO_DES 0x01
168 #define TYPHOON_CRYPTO_3DES 0x02
169 #define TYPHOON_CRYPTO_VARIABLE 0x04
170 #define TYPHOON_FIBER 0x08
171 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10
172
173 enum typhoon_cards {
174 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
175 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
176 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
177 TYPHOON_FXM,
178 };
179
180 /* directly indexed by enum typhoon_cards, above */
181 static const struct typhoon_card_info typhoon_card_info[] __devinitdata = {
182 { "3Com Typhoon (3C990-TX)",
183 TYPHOON_CRYPTO_NONE},
184 { "3Com Typhoon (3CR990-TX-95)",
185 TYPHOON_CRYPTO_DES},
186 { "3Com Typhoon (3CR990-TX-97)",
187 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
188 { "3Com Typhoon (3C990SVR)",
189 TYPHOON_CRYPTO_NONE},
190 { "3Com Typhoon (3CR990SVR95)",
191 TYPHOON_CRYPTO_DES},
192 { "3Com Typhoon (3CR990SVR97)",
193 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
194 { "3Com Typhoon2 (3C990B-TX-M)",
195 TYPHOON_CRYPTO_VARIABLE},
196 { "3Com Typhoon2 (3C990BSVR)",
197 TYPHOON_CRYPTO_VARIABLE},
198 { "3Com Typhoon (3CR990-FX-95)",
199 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
200 { "3Com Typhoon (3CR990-FX-97)",
201 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
202 { "3Com Typhoon (3CR990-FX-95 Server)",
203 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
204 { "3Com Typhoon (3CR990-FX-97 Server)",
205 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
206 { "3Com Typhoon2 (3C990B-FX-97)",
207 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
208 };
209
210 /* Notes on the new subsystem numbering scheme:
211 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
212 * bit 4 indicates if this card has secured firmware (we don't support it)
213 * bit 8 indicates if this is a (0) copper or (1) fiber card
214 * bits 12-16 indicate card type: (0) client and (1) server
215 */
216 static struct pci_device_id typhoon_pci_tbl[] = {
217 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
223 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
224 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
225 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
226 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
227 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
228 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
229 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
230 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
231 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
232 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
233 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
234 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
235 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
236 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
237 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
239 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
241 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
243 { 0, }
244 };
245 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
246
247 /* Define the shared memory area
248 * Align everything the 3XP will normally be using.
249 * We'll need to move/align txHi if we start using that ring.
250 */
251 #define __3xp_aligned ____cacheline_aligned
252 struct typhoon_shared {
253 struct typhoon_interface iface;
254 struct typhoon_indexes indexes __3xp_aligned;
255 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
256 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
257 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
258 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
259 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
260 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
261 u32 zeroWord;
262 struct tx_desc txHi[TXHI_ENTRIES];
263 } __attribute__ ((packed));
264
265 struct rxbuff_ent {
266 struct sk_buff *skb;
267 dma_addr_t dma_addr;
268 };
269
270 struct typhoon {
271 /* Tx cache line section */
272 struct transmit_ring txLoRing ____cacheline_aligned;
273 struct pci_dev * tx_pdev;
274 void __iomem *tx_ioaddr;
275 u32 txlo_dma_addr;
276
277 /* Irq/Rx cache line section */
278 void __iomem *ioaddr ____cacheline_aligned;
279 struct typhoon_indexes *indexes;
280 u8 awaiting_resp;
281 u8 duplex;
282 u8 speed;
283 u8 card_state;
284 struct basic_ring rxLoRing;
285 struct pci_dev * pdev;
286 struct net_device * dev;
287 spinlock_t state_lock;
288 struct vlan_group * vlgrp;
289 struct basic_ring rxHiRing;
290 struct basic_ring rxBuffRing;
291 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
292
293 /* general section */
294 spinlock_t command_lock ____cacheline_aligned;
295 struct basic_ring cmdRing;
296 struct basic_ring respRing;
297 struct net_device_stats stats;
298 struct net_device_stats stats_saved;
299 const char * name;
300 struct typhoon_shared * shared;
301 dma_addr_t shared_dma;
302 u16 xcvr_select;
303 u16 wol_events;
304 u32 offload;
305
306 /* unused stuff (future use) */
307 int capabilities;
308 struct transmit_ring txHiRing;
309 };
310
311 enum completion_wait_values {
312 NoWait = 0, WaitNoSleep, WaitSleep,
313 };
314
315 /* These are the values for the typhoon.card_state variable.
316 * These determine where the statistics will come from in get_stats().
317 * The sleep image does not support the statistics we need.
318 */
319 enum state_values {
320 Sleeping = 0, Running,
321 };
322
323 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
324 * cannot pass a read, so this forces current writes to post.
325 */
326 #define typhoon_post_pci_writes(x) \
327 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
328
329 /* We'll wait up to six seconds for a reset, and half a second normally.
330 */
331 #define TYPHOON_UDELAY 50
332 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
333 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
334 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
335
336 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
337 #define typhoon_synchronize_irq(x) synchronize_irq()
338 #else
339 #define typhoon_synchronize_irq(x) synchronize_irq(x)
340 #endif
341
342 #if defined(NETIF_F_TSO)
343 #define skb_tso_size(x) (skb_shinfo(x)->gso_size)
344 #define TSO_NUM_DESCRIPTORS 2
345 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
346 #else
347 #define NETIF_F_TSO 0
348 #define skb_tso_size(x) 0
349 #define TSO_NUM_DESCRIPTORS 0
350 #define TSO_OFFLOAD_ON 0
351 #endif
352
353 static inline void
354 typhoon_inc_index(u32 *index, const int count, const int num_entries)
355 {
356 /* Increment a ring index -- we can use this for all rings execept
357 * the Rx rings, as they use different size descriptors
358 * otherwise, everything is the same size as a cmd_desc
359 */
360 *index += count * sizeof(struct cmd_desc);
361 *index %= num_entries * sizeof(struct cmd_desc);
362 }
363
364 static inline void
365 typhoon_inc_cmd_index(u32 *index, const int count)
366 {
367 typhoon_inc_index(index, count, COMMAND_ENTRIES);
368 }
369
370 static inline void
371 typhoon_inc_resp_index(u32 *index, const int count)
372 {
373 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
374 }
375
376 static inline void
377 typhoon_inc_rxfree_index(u32 *index, const int count)
378 {
379 typhoon_inc_index(index, count, RXFREE_ENTRIES);
380 }
381
382 static inline void
383 typhoon_inc_tx_index(u32 *index, const int count)
384 {
385 /* if we start using the Hi Tx ring, this needs updateing */
386 typhoon_inc_index(index, count, TXLO_ENTRIES);
387 }
388
389 static inline void
390 typhoon_inc_rx_index(u32 *index, const int count)
391 {
392 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
393 *index += count * sizeof(struct rx_desc);
394 *index %= RX_ENTRIES * sizeof(struct rx_desc);
395 }
396
397 static int
398 typhoon_reset(void __iomem *ioaddr, int wait_type)
399 {
400 int i, err = 0;
401 int timeout;
402
403 if(wait_type == WaitNoSleep)
404 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
405 else
406 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
407
408 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
409 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
410
411 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
412 typhoon_post_pci_writes(ioaddr);
413 udelay(1);
414 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
415
416 if(wait_type != NoWait) {
417 for(i = 0; i < timeout; i++) {
418 if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
419 TYPHOON_STATUS_WAITING_FOR_HOST)
420 goto out;
421
422 if(wait_type == WaitSleep)
423 schedule_timeout_uninterruptible(1);
424 else
425 udelay(TYPHOON_UDELAY);
426 }
427
428 err = -ETIMEDOUT;
429 }
430
431 out:
432 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
433 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
434
435 /* The 3XP seems to need a little extra time to complete the load
436 * of the sleep image before we can reliably boot it. Failure to
437 * do this occasionally results in a hung adapter after boot in
438 * typhoon_init_one() while trying to read the MAC address or
439 * putting the card to sleep. 3Com's driver waits 5ms, but
440 * that seems to be overkill. However, if we can sleep, we might
441 * as well give it that much time. Otherwise, we'll give it 500us,
442 * which should be enough (I've see it work well at 100us, but still
443 * saw occasional problems.)
444 */
445 if(wait_type == WaitSleep)
446 msleep(5);
447 else
448 udelay(500);
449 return err;
450 }
451
452 static int
453 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
454 {
455 int i, err = 0;
456
457 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
458 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
459 goto out;
460 udelay(TYPHOON_UDELAY);
461 }
462
463 err = -ETIMEDOUT;
464
465 out:
466 return err;
467 }
468
469 static inline void
470 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
471 {
472 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
473 netif_carrier_off(dev);
474 else
475 netif_carrier_on(dev);
476 }
477
478 static inline void
479 typhoon_hello(struct typhoon *tp)
480 {
481 struct basic_ring *ring = &tp->cmdRing;
482 struct cmd_desc *cmd;
483
484 /* We only get a hello request if we've not sent anything to the
485 * card in a long while. If the lock is held, then we're in the
486 * process of issuing a command, so we don't need to respond.
487 */
488 if(spin_trylock(&tp->command_lock)) {
489 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
490 typhoon_inc_cmd_index(&ring->lastWrite, 1);
491
492 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
493 smp_wmb();
494 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
495 spin_unlock(&tp->command_lock);
496 }
497 }
498
499 static int
500 typhoon_process_response(struct typhoon *tp, int resp_size,
501 struct resp_desc *resp_save)
502 {
503 struct typhoon_indexes *indexes = tp->indexes;
504 struct resp_desc *resp;
505 u8 *base = tp->respRing.ringBase;
506 int count, len, wrap_len;
507 u32 cleared;
508 u32 ready;
509
510 cleared = le32_to_cpu(indexes->respCleared);
511 ready = le32_to_cpu(indexes->respReady);
512 while(cleared != ready) {
513 resp = (struct resp_desc *)(base + cleared);
514 count = resp->numDesc + 1;
515 if(resp_save && resp->seqNo) {
516 if(count > resp_size) {
517 resp_save->flags = TYPHOON_RESP_ERROR;
518 goto cleanup;
519 }
520
521 wrap_len = 0;
522 len = count * sizeof(*resp);
523 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
524 wrap_len = cleared + len - RESPONSE_RING_SIZE;
525 len = RESPONSE_RING_SIZE - cleared;
526 }
527
528 memcpy(resp_save, resp, len);
529 if(unlikely(wrap_len)) {
530 resp_save += len / sizeof(*resp);
531 memcpy(resp_save, base, wrap_len);
532 }
533
534 resp_save = NULL;
535 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
536 typhoon_media_status(tp->dev, resp);
537 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
538 typhoon_hello(tp);
539 } else {
540 printk(KERN_ERR "%s: dumping unexpected response "
541 "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
542 tp->name, le16_to_cpu(resp->cmd),
543 resp->numDesc, resp->flags,
544 le16_to_cpu(resp->parm1),
545 le32_to_cpu(resp->parm2),
546 le32_to_cpu(resp->parm3));
547 }
548
549 cleanup:
550 typhoon_inc_resp_index(&cleared, count);
551 }
552
553 indexes->respCleared = cpu_to_le32(cleared);
554 wmb();
555 return (resp_save == NULL);
556 }
557
558 static inline int
559 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
560 {
561 /* this works for all descriptors but rx_desc, as they are a
562 * different size than the cmd_desc -- everyone else is the same
563 */
564 lastWrite /= sizeof(struct cmd_desc);
565 lastRead /= sizeof(struct cmd_desc);
566 return (ringSize + lastRead - lastWrite - 1) % ringSize;
567 }
568
569 static inline int
570 typhoon_num_free_cmd(struct typhoon *tp)
571 {
572 int lastWrite = tp->cmdRing.lastWrite;
573 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
574
575 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
576 }
577
578 static inline int
579 typhoon_num_free_resp(struct typhoon *tp)
580 {
581 int respReady = le32_to_cpu(tp->indexes->respReady);
582 int respCleared = le32_to_cpu(tp->indexes->respCleared);
583
584 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
585 }
586
587 static inline int
588 typhoon_num_free_tx(struct transmit_ring *ring)
589 {
590 /* if we start using the Hi Tx ring, this needs updating */
591 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
592 }
593
594 static int
595 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
596 int num_resp, struct resp_desc *resp)
597 {
598 struct typhoon_indexes *indexes = tp->indexes;
599 struct basic_ring *ring = &tp->cmdRing;
600 struct resp_desc local_resp;
601 int i, err = 0;
602 int got_resp;
603 int freeCmd, freeResp;
604 int len, wrap_len;
605
606 spin_lock(&tp->command_lock);
607
608 freeCmd = typhoon_num_free_cmd(tp);
609 freeResp = typhoon_num_free_resp(tp);
610
611 if(freeCmd < num_cmd || freeResp < num_resp) {
612 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
613 "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
614 freeResp, num_resp);
615 err = -ENOMEM;
616 goto out;
617 }
618
619 if(cmd->flags & TYPHOON_CMD_RESPOND) {
620 /* If we're expecting a response, but the caller hasn't given
621 * us a place to put it, we'll provide one.
622 */
623 tp->awaiting_resp = 1;
624 if(resp == NULL) {
625 resp = &local_resp;
626 num_resp = 1;
627 }
628 }
629
630 wrap_len = 0;
631 len = num_cmd * sizeof(*cmd);
632 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
633 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
634 len = COMMAND_RING_SIZE - ring->lastWrite;
635 }
636
637 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
638 if(unlikely(wrap_len)) {
639 struct cmd_desc *wrap_ptr = cmd;
640 wrap_ptr += len / sizeof(*cmd);
641 memcpy(ring->ringBase, wrap_ptr, wrap_len);
642 }
643
644 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
645
646 /* "I feel a presence... another warrior is on the the mesa."
647 */
648 wmb();
649 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
650 typhoon_post_pci_writes(tp->ioaddr);
651
652 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
653 goto out;
654
655 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
656 * preempt or do anything other than take interrupts. So, don't
657 * wait for a response unless you have to.
658 *
659 * I've thought about trying to sleep here, but we're called
660 * from many contexts that don't allow that. Also, given the way
661 * 3Com has implemented irq coalescing, we would likely timeout --
662 * this has been observed in real life!
663 *
664 * The big killer is we have to wait to get stats from the card,
665 * though we could go to a periodic refresh of those if we don't
666 * mind them getting somewhat stale. The rest of the waiting
667 * commands occur during open/close/suspend/resume, so they aren't
668 * time critical. Creating SAs in the future will also have to
669 * wait here.
670 */
671 got_resp = 0;
672 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
673 if(indexes->respCleared != indexes->respReady)
674 got_resp = typhoon_process_response(tp, num_resp,
675 resp);
676 udelay(TYPHOON_UDELAY);
677 }
678
679 if(!got_resp) {
680 err = -ETIMEDOUT;
681 goto out;
682 }
683
684 /* Collect the error response even if we don't care about the
685 * rest of the response
686 */
687 if(resp->flags & TYPHOON_RESP_ERROR)
688 err = -EIO;
689
690 out:
691 if(tp->awaiting_resp) {
692 tp->awaiting_resp = 0;
693 smp_wmb();
694
695 /* Ugh. If a response was added to the ring between
696 * the call to typhoon_process_response() and the clearing
697 * of tp->awaiting_resp, we could have missed the interrupt
698 * and it could hang in the ring an indeterminate amount of
699 * time. So, check for it, and interrupt ourselves if this
700 * is the case.
701 */
702 if(indexes->respCleared != indexes->respReady)
703 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
704 }
705
706 spin_unlock(&tp->command_lock);
707 return err;
708 }
709
710 static void
711 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
712 {
713 struct typhoon *tp = netdev_priv(dev);
714 struct cmd_desc xp_cmd;
715 int err;
716
717 spin_lock_bh(&tp->state_lock);
718 if(!tp->vlgrp != !grp) {
719 /* We've either been turned on for the first time, or we've
720 * been turned off. Update the 3XP.
721 */
722 if(grp)
723 tp->offload |= TYPHOON_OFFLOAD_VLAN;
724 else
725 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
726
727 /* If the interface is up, the runtime is running -- and we
728 * must be up for the vlan core to call us.
729 *
730 * Do the command outside of the spin lock, as it is slow.
731 */
732 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
733 TYPHOON_CMD_SET_OFFLOAD_TASKS);
734 xp_cmd.parm2 = tp->offload;
735 xp_cmd.parm3 = tp->offload;
736 spin_unlock_bh(&tp->state_lock);
737 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
738 if(err < 0)
739 printk("%s: vlan offload error %d\n", tp->name, -err);
740 spin_lock_bh(&tp->state_lock);
741 }
742
743 /* now make the change visible */
744 tp->vlgrp = grp;
745 spin_unlock_bh(&tp->state_lock);
746 }
747
748 static void
749 typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
750 {
751 struct typhoon *tp = netdev_priv(dev);
752 spin_lock_bh(&tp->state_lock);
753 if(tp->vlgrp)
754 tp->vlgrp->vlan_devices[vid] = NULL;
755 spin_unlock_bh(&tp->state_lock);
756 }
757
758 static inline void
759 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
760 u32 ring_dma)
761 {
762 struct tcpopt_desc *tcpd;
763 u32 tcpd_offset = ring_dma;
764
765 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
766 tcpd_offset += txRing->lastWrite;
767 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
768 typhoon_inc_tx_index(&txRing->lastWrite, 1);
769
770 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
771 tcpd->numDesc = 1;
772 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
773 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
774 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
775 tcpd->bytesTx = cpu_to_le32(skb->len);
776 tcpd->status = 0;
777 }
778
779 static int
780 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
781 {
782 struct typhoon *tp = netdev_priv(dev);
783 struct transmit_ring *txRing;
784 struct tx_desc *txd, *first_txd;
785 dma_addr_t skb_dma;
786 int numDesc;
787
788 /* we have two rings to choose from, but we only use txLo for now
789 * If we start using the Hi ring as well, we'll need to update
790 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
791 * and TXHI_ENTRIES to match, as well as update the TSO code below
792 * to get the right DMA address
793 */
794 txRing = &tp->txLoRing;
795
796 /* We need one descriptor for each fragment of the sk_buff, plus the
797 * one for the ->data area of it.
798 *
799 * The docs say a maximum of 16 fragment descriptors per TCP option
800 * descriptor, then make a new packet descriptor and option descriptor
801 * for the next 16 fragments. The engineers say just an option
802 * descriptor is needed. I've tested up to 26 fragments with a single
803 * packet descriptor/option descriptor combo, so I use that for now.
804 *
805 * If problems develop with TSO, check this first.
806 */
807 numDesc = skb_shinfo(skb)->nr_frags + 1;
808 if (skb_is_gso(skb))
809 numDesc++;
810
811 /* When checking for free space in the ring, we need to also
812 * account for the initial Tx descriptor, and we always must leave
813 * at least one descriptor unused in the ring so that it doesn't
814 * wrap and look empty.
815 *
816 * The only time we should loop here is when we hit the race
817 * between marking the queue awake and updating the cleared index.
818 * Just loop and it will appear. This comes from the acenic driver.
819 */
820 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
821 smp_rmb();
822
823 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
824 typhoon_inc_tx_index(&txRing->lastWrite, 1);
825
826 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
827 first_txd->numDesc = 0;
828 first_txd->len = 0;
829 first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
830 first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
831 first_txd->processFlags = 0;
832
833 if(skb->ip_summed == CHECKSUM_PARTIAL) {
834 /* The 3XP will figure out if this is UDP/TCP */
835 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
836 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
837 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
838 }
839
840 if(vlan_tx_tag_present(skb)) {
841 first_txd->processFlags |=
842 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
843 first_txd->processFlags |=
844 cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
845 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
846 }
847
848 if (skb_is_gso(skb)) {
849 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
850 first_txd->numDesc++;
851
852 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
853 }
854
855 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
856 typhoon_inc_tx_index(&txRing->lastWrite, 1);
857
858 /* No need to worry about padding packet -- the firmware pads
859 * it with zeros to ETH_ZLEN for us.
860 */
861 if(skb_shinfo(skb)->nr_frags == 0) {
862 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
863 PCI_DMA_TODEVICE);
864 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
865 txd->len = cpu_to_le16(skb->len);
866 txd->addr = cpu_to_le32(skb_dma);
867 txd->addrHi = 0;
868 first_txd->numDesc++;
869 } else {
870 int i, len;
871
872 len = skb_headlen(skb);
873 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
874 PCI_DMA_TODEVICE);
875 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
876 txd->len = cpu_to_le16(len);
877 txd->addr = cpu_to_le32(skb_dma);
878 txd->addrHi = 0;
879 first_txd->numDesc++;
880
881 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
882 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
883 void *frag_addr;
884
885 txd = (struct tx_desc *) (txRing->ringBase +
886 txRing->lastWrite);
887 typhoon_inc_tx_index(&txRing->lastWrite, 1);
888
889 len = frag->size;
890 frag_addr = (void *) page_address(frag->page) +
891 frag->page_offset;
892 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
893 PCI_DMA_TODEVICE);
894 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
895 txd->len = cpu_to_le16(len);
896 txd->addr = cpu_to_le32(skb_dma);
897 txd->addrHi = 0;
898 first_txd->numDesc++;
899 }
900 }
901
902 /* Kick the 3XP
903 */
904 wmb();
905 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
906
907 dev->trans_start = jiffies;
908
909 /* If we don't have room to put the worst case packet on the
910 * queue, then we must stop the queue. We need 2 extra
911 * descriptors -- one to prevent ring wrap, and one for the
912 * Tx header.
913 */
914 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
915
916 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
917 netif_stop_queue(dev);
918
919 /* A Tx complete IRQ could have gotten inbetween, making
920 * the ring free again. Only need to recheck here, since
921 * Tx is serialized.
922 */
923 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
924 netif_wake_queue(dev);
925 }
926
927 return 0;
928 }
929
930 static void
931 typhoon_set_rx_mode(struct net_device *dev)
932 {
933 struct typhoon *tp = netdev_priv(dev);
934 struct cmd_desc xp_cmd;
935 u32 mc_filter[2];
936 u16 filter;
937
938 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
939 if(dev->flags & IFF_PROMISC) {
940 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
941 } else if((dev->mc_count > multicast_filter_limit) ||
942 (dev->flags & IFF_ALLMULTI)) {
943 /* Too many to match, or accept all multicasts. */
944 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
945 } else if(dev->mc_count) {
946 struct dev_mc_list *mclist;
947 int i;
948
949 memset(mc_filter, 0, sizeof(mc_filter));
950 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
951 i++, mclist = mclist->next) {
952 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
953 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
954 }
955
956 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
957 TYPHOON_CMD_SET_MULTICAST_HASH);
958 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
959 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
960 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
961 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
962
963 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
964 }
965
966 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
967 xp_cmd.parm1 = filter;
968 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
969 }
970
971 static int
972 typhoon_do_get_stats(struct typhoon *tp)
973 {
974 struct net_device_stats *stats = &tp->stats;
975 struct net_device_stats *saved = &tp->stats_saved;
976 struct cmd_desc xp_cmd;
977 struct resp_desc xp_resp[7];
978 struct stats_resp *s = (struct stats_resp *) xp_resp;
979 int err;
980
981 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
982 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
983 if(err < 0)
984 return err;
985
986 /* 3Com's Linux driver uses txMultipleCollisions as it's
987 * collisions value, but there is some other collision info as well...
988 *
989 * The extra status reported would be a good candidate for
990 * ethtool_ops->get_{strings,stats}()
991 */
992 stats->tx_packets = le32_to_cpu(s->txPackets);
993 stats->tx_bytes = le32_to_cpu(s->txBytes);
994 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
995 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
996 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
997 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
998 stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
999 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
1000 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
1001 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
1002 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
1003 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
1004 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
1005 SPEED_100 : SPEED_10;
1006 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
1007 DUPLEX_FULL : DUPLEX_HALF;
1008
1009 /* add in the saved statistics
1010 */
1011 stats->tx_packets += saved->tx_packets;
1012 stats->tx_bytes += saved->tx_bytes;
1013 stats->tx_errors += saved->tx_errors;
1014 stats->collisions += saved->collisions;
1015 stats->rx_packets += saved->rx_packets;
1016 stats->rx_bytes += saved->rx_bytes;
1017 stats->rx_fifo_errors += saved->rx_fifo_errors;
1018 stats->rx_errors += saved->rx_errors;
1019 stats->rx_crc_errors += saved->rx_crc_errors;
1020 stats->rx_length_errors += saved->rx_length_errors;
1021
1022 return 0;
1023 }
1024
1025 static struct net_device_stats *
1026 typhoon_get_stats(struct net_device *dev)
1027 {
1028 struct typhoon *tp = netdev_priv(dev);
1029 struct net_device_stats *stats = &tp->stats;
1030 struct net_device_stats *saved = &tp->stats_saved;
1031
1032 smp_rmb();
1033 if(tp->card_state == Sleeping)
1034 return saved;
1035
1036 if(typhoon_do_get_stats(tp) < 0) {
1037 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1038 return saved;
1039 }
1040
1041 return stats;
1042 }
1043
1044 static int
1045 typhoon_set_mac_address(struct net_device *dev, void *addr)
1046 {
1047 struct sockaddr *saddr = (struct sockaddr *) addr;
1048
1049 if(netif_running(dev))
1050 return -EBUSY;
1051
1052 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1053 return 0;
1054 }
1055
1056 static void
1057 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1058 {
1059 struct typhoon *tp = netdev_priv(dev);
1060 struct pci_dev *pci_dev = tp->pdev;
1061 struct cmd_desc xp_cmd;
1062 struct resp_desc xp_resp[3];
1063
1064 smp_rmb();
1065 if(tp->card_state == Sleeping) {
1066 strcpy(info->fw_version, "Sleep image");
1067 } else {
1068 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1069 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1070 strcpy(info->fw_version, "Unknown runtime");
1071 } else {
1072 u32 sleep_ver = xp_resp[0].parm2;
1073 snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1074 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1075 sleep_ver & 0xfff);
1076 }
1077 }
1078
1079 strcpy(info->driver, DRV_MODULE_NAME);
1080 strcpy(info->version, DRV_MODULE_VERSION);
1081 strcpy(info->bus_info, pci_name(pci_dev));
1082 }
1083
1084 static int
1085 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1086 {
1087 struct typhoon *tp = netdev_priv(dev);
1088
1089 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1090 SUPPORTED_Autoneg;
1091
1092 switch (tp->xcvr_select) {
1093 case TYPHOON_XCVR_10HALF:
1094 cmd->advertising = ADVERTISED_10baseT_Half;
1095 break;
1096 case TYPHOON_XCVR_10FULL:
1097 cmd->advertising = ADVERTISED_10baseT_Full;
1098 break;
1099 case TYPHOON_XCVR_100HALF:
1100 cmd->advertising = ADVERTISED_100baseT_Half;
1101 break;
1102 case TYPHOON_XCVR_100FULL:
1103 cmd->advertising = ADVERTISED_100baseT_Full;
1104 break;
1105 case TYPHOON_XCVR_AUTONEG:
1106 cmd->advertising = ADVERTISED_10baseT_Half |
1107 ADVERTISED_10baseT_Full |
1108 ADVERTISED_100baseT_Half |
1109 ADVERTISED_100baseT_Full |
1110 ADVERTISED_Autoneg;
1111 break;
1112 }
1113
1114 if(tp->capabilities & TYPHOON_FIBER) {
1115 cmd->supported |= SUPPORTED_FIBRE;
1116 cmd->advertising |= ADVERTISED_FIBRE;
1117 cmd->port = PORT_FIBRE;
1118 } else {
1119 cmd->supported |= SUPPORTED_10baseT_Half |
1120 SUPPORTED_10baseT_Full |
1121 SUPPORTED_TP;
1122 cmd->advertising |= ADVERTISED_TP;
1123 cmd->port = PORT_TP;
1124 }
1125
1126 /* need to get stats to make these link speed/duplex valid */
1127 typhoon_do_get_stats(tp);
1128 cmd->speed = tp->speed;
1129 cmd->duplex = tp->duplex;
1130 cmd->phy_address = 0;
1131 cmd->transceiver = XCVR_INTERNAL;
1132 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1133 cmd->autoneg = AUTONEG_ENABLE;
1134 else
1135 cmd->autoneg = AUTONEG_DISABLE;
1136 cmd->maxtxpkt = 1;
1137 cmd->maxrxpkt = 1;
1138
1139 return 0;
1140 }
1141
1142 static int
1143 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1144 {
1145 struct typhoon *tp = netdev_priv(dev);
1146 struct cmd_desc xp_cmd;
1147 int xcvr;
1148 int err;
1149
1150 err = -EINVAL;
1151 if(cmd->autoneg == AUTONEG_ENABLE) {
1152 xcvr = TYPHOON_XCVR_AUTONEG;
1153 } else {
1154 if(cmd->duplex == DUPLEX_HALF) {
1155 if(cmd->speed == SPEED_10)
1156 xcvr = TYPHOON_XCVR_10HALF;
1157 else if(cmd->speed == SPEED_100)
1158 xcvr = TYPHOON_XCVR_100HALF;
1159 else
1160 goto out;
1161 } else if(cmd->duplex == DUPLEX_FULL) {
1162 if(cmd->speed == SPEED_10)
1163 xcvr = TYPHOON_XCVR_10FULL;
1164 else if(cmd->speed == SPEED_100)
1165 xcvr = TYPHOON_XCVR_100FULL;
1166 else
1167 goto out;
1168 } else
1169 goto out;
1170 }
1171
1172 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1173 xp_cmd.parm1 = cpu_to_le16(xcvr);
1174 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1175 if(err < 0)
1176 goto out;
1177
1178 tp->xcvr_select = xcvr;
1179 if(cmd->autoneg == AUTONEG_ENABLE) {
1180 tp->speed = 0xff; /* invalid */
1181 tp->duplex = 0xff; /* invalid */
1182 } else {
1183 tp->speed = cmd->speed;
1184 tp->duplex = cmd->duplex;
1185 }
1186
1187 out:
1188 return err;
1189 }
1190
1191 static void
1192 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1193 {
1194 struct typhoon *tp = netdev_priv(dev);
1195
1196 wol->supported = WAKE_PHY | WAKE_MAGIC;
1197 wol->wolopts = 0;
1198 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1199 wol->wolopts |= WAKE_PHY;
1200 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1201 wol->wolopts |= WAKE_MAGIC;
1202 memset(&wol->sopass, 0, sizeof(wol->sopass));
1203 }
1204
1205 static int
1206 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1207 {
1208 struct typhoon *tp = netdev_priv(dev);
1209
1210 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1211 return -EINVAL;
1212
1213 tp->wol_events = 0;
1214 if(wol->wolopts & WAKE_PHY)
1215 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1216 if(wol->wolopts & WAKE_MAGIC)
1217 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1218
1219 return 0;
1220 }
1221
1222 static u32
1223 typhoon_get_rx_csum(struct net_device *dev)
1224 {
1225 /* For now, we don't allow turning off RX checksums.
1226 */
1227 return 1;
1228 }
1229
1230 static void
1231 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1232 {
1233 ering->rx_max_pending = RXENT_ENTRIES;
1234 ering->rx_mini_max_pending = 0;
1235 ering->rx_jumbo_max_pending = 0;
1236 ering->tx_max_pending = TXLO_ENTRIES - 1;
1237
1238 ering->rx_pending = RXENT_ENTRIES;
1239 ering->rx_mini_pending = 0;
1240 ering->rx_jumbo_pending = 0;
1241 ering->tx_pending = TXLO_ENTRIES - 1;
1242 }
1243
1244 static const struct ethtool_ops typhoon_ethtool_ops = {
1245 .get_settings = typhoon_get_settings,
1246 .set_settings = typhoon_set_settings,
1247 .get_drvinfo = typhoon_get_drvinfo,
1248 .get_wol = typhoon_get_wol,
1249 .set_wol = typhoon_set_wol,
1250 .get_link = ethtool_op_get_link,
1251 .get_rx_csum = typhoon_get_rx_csum,
1252 .get_tx_csum = ethtool_op_get_tx_csum,
1253 .set_tx_csum = ethtool_op_set_tx_csum,
1254 .get_sg = ethtool_op_get_sg,
1255 .set_sg = ethtool_op_set_sg,
1256 .get_tso = ethtool_op_get_tso,
1257 .set_tso = ethtool_op_set_tso,
1258 .get_ringparam = typhoon_get_ringparam,
1259 };
1260
1261 static int
1262 typhoon_wait_interrupt(void __iomem *ioaddr)
1263 {
1264 int i, err = 0;
1265
1266 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1267 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1268 TYPHOON_INTR_BOOTCMD)
1269 goto out;
1270 udelay(TYPHOON_UDELAY);
1271 }
1272
1273 err = -ETIMEDOUT;
1274
1275 out:
1276 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1277 return err;
1278 }
1279
1280 #define shared_offset(x) offsetof(struct typhoon_shared, x)
1281
1282 static void
1283 typhoon_init_interface(struct typhoon *tp)
1284 {
1285 struct typhoon_interface *iface = &tp->shared->iface;
1286 dma_addr_t shared_dma;
1287
1288 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1289
1290 /* The *Hi members of iface are all init'd to zero by the memset().
1291 */
1292 shared_dma = tp->shared_dma + shared_offset(indexes);
1293 iface->ringIndex = cpu_to_le32(shared_dma);
1294
1295 shared_dma = tp->shared_dma + shared_offset(txLo);
1296 iface->txLoAddr = cpu_to_le32(shared_dma);
1297 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1298
1299 shared_dma = tp->shared_dma + shared_offset(txHi);
1300 iface->txHiAddr = cpu_to_le32(shared_dma);
1301 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1302
1303 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1304 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1305 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1306 sizeof(struct rx_free));
1307
1308 shared_dma = tp->shared_dma + shared_offset(rxLo);
1309 iface->rxLoAddr = cpu_to_le32(shared_dma);
1310 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1311
1312 shared_dma = tp->shared_dma + shared_offset(rxHi);
1313 iface->rxHiAddr = cpu_to_le32(shared_dma);
1314 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1315
1316 shared_dma = tp->shared_dma + shared_offset(cmd);
1317 iface->cmdAddr = cpu_to_le32(shared_dma);
1318 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1319
1320 shared_dma = tp->shared_dma + shared_offset(resp);
1321 iface->respAddr = cpu_to_le32(shared_dma);
1322 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1323
1324 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1325 iface->zeroAddr = cpu_to_le32(shared_dma);
1326
1327 tp->indexes = &tp->shared->indexes;
1328 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1329 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1330 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1331 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1332 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1333 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1334 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1335
1336 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1337 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1338
1339 tp->txlo_dma_addr = iface->txLoAddr;
1340 tp->card_state = Sleeping;
1341 smp_wmb();
1342
1343 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1344 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1345
1346 spin_lock_init(&tp->command_lock);
1347 spin_lock_init(&tp->state_lock);
1348 }
1349
1350 static void
1351 typhoon_init_rings(struct typhoon *tp)
1352 {
1353 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1354
1355 tp->txLoRing.lastWrite = 0;
1356 tp->txHiRing.lastWrite = 0;
1357 tp->rxLoRing.lastWrite = 0;
1358 tp->rxHiRing.lastWrite = 0;
1359 tp->rxBuffRing.lastWrite = 0;
1360 tp->cmdRing.lastWrite = 0;
1361 tp->cmdRing.lastWrite = 0;
1362
1363 tp->txLoRing.lastRead = 0;
1364 tp->txHiRing.lastRead = 0;
1365 }
1366
1367 static int
1368 typhoon_download_firmware(struct typhoon *tp)
1369 {
1370 void __iomem *ioaddr = tp->ioaddr;
1371 struct pci_dev *pdev = tp->pdev;
1372 struct typhoon_file_header *fHdr;
1373 struct typhoon_section_header *sHdr;
1374 u8 *image_data;
1375 void *dpage;
1376 dma_addr_t dpage_dma;
1377 unsigned int csum;
1378 u32 irqEnabled;
1379 u32 irqMasked;
1380 u32 numSections;
1381 u32 section_len;
1382 u32 len;
1383 u32 load_addr;
1384 u32 hmac;
1385 int i;
1386 int err;
1387
1388 err = -EINVAL;
1389 fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
1390 image_data = (u8 *) fHdr;
1391
1392 if(memcmp(fHdr->tag, "TYPHOON", 8)) {
1393 printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
1394 goto err_out;
1395 }
1396
1397 /* Cannot just map the firmware image using pci_map_single() as
1398 * the firmware is part of the kernel/module image, so we allocate
1399 * some consistent memory to copy the sections into, as it is simpler,
1400 * and short-lived. If we ever split out and require a userland
1401 * firmware loader, then we can revisit this.
1402 */
1403 err = -ENOMEM;
1404 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1405 if(!dpage) {
1406 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1407 goto err_out;
1408 }
1409
1410 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1411 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1412 ioaddr + TYPHOON_REG_INTR_ENABLE);
1413 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1414 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1415 ioaddr + TYPHOON_REG_INTR_MASK);
1416
1417 err = -ETIMEDOUT;
1418 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1419 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1420 goto err_out_irq;
1421 }
1422
1423 numSections = le32_to_cpu(fHdr->numSections);
1424 load_addr = le32_to_cpu(fHdr->startAddr);
1425
1426 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1427 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1428 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1429 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1430 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1431 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1432 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1433 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1434 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1435 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1436 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1437 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1438 typhoon_post_pci_writes(ioaddr);
1439 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1440
1441 image_data += sizeof(struct typhoon_file_header);
1442
1443 /* The ioread32() in typhoon_wait_interrupt() will force the
1444 * last write to the command register to post, so
1445 * we don't need a typhoon_post_pci_writes() after it.
1446 */
1447 for(i = 0; i < numSections; i++) {
1448 sHdr = (struct typhoon_section_header *) image_data;
1449 image_data += sizeof(struct typhoon_section_header);
1450 load_addr = le32_to_cpu(sHdr->startAddr);
1451 section_len = le32_to_cpu(sHdr->len);
1452
1453 while(section_len) {
1454 len = min_t(u32, section_len, PAGE_SIZE);
1455
1456 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1457 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1458 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1459 printk(KERN_ERR "%s: segment ready timeout\n",
1460 tp->name);
1461 goto err_out_irq;
1462 }
1463
1464 /* Do an pseudo IPv4 checksum on the data -- first
1465 * need to convert each u16 to cpu order before
1466 * summing. Fortunately, due to the properties of
1467 * the checksum, we can do this once, at the end.
1468 */
1469 csum = csum_partial_copy_nocheck(image_data, dpage,
1470 len, 0);
1471 csum = csum_fold(csum);
1472 csum = le16_to_cpu(csum);
1473
1474 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1475 iowrite32(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1476 iowrite32(load_addr,
1477 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1478 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1479 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1480 typhoon_post_pci_writes(ioaddr);
1481 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1482 ioaddr + TYPHOON_REG_COMMAND);
1483
1484 image_data += len;
1485 load_addr += len;
1486 section_len -= len;
1487 }
1488 }
1489
1490 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1491 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1492 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1493 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1494 goto err_out_irq;
1495 }
1496
1497 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1498
1499 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1500 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1501 tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1502 goto err_out_irq;
1503 }
1504
1505 err = 0;
1506
1507 err_out_irq:
1508 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1509 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1510
1511 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1512
1513 err_out:
1514 return err;
1515 }
1516
1517 static int
1518 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1519 {
1520 void __iomem *ioaddr = tp->ioaddr;
1521
1522 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1523 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1524 goto out_timeout;
1525 }
1526
1527 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1528 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1529 typhoon_post_pci_writes(ioaddr);
1530 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1531 ioaddr + TYPHOON_REG_COMMAND);
1532
1533 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1534 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1535 tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1536 goto out_timeout;
1537 }
1538
1539 /* Clear the Transmit and Command ready registers
1540 */
1541 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1542 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1543 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1544 typhoon_post_pci_writes(ioaddr);
1545 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1546
1547 return 0;
1548
1549 out_timeout:
1550 return -ETIMEDOUT;
1551 }
1552
1553 static u32
1554 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1555 volatile u32 * index)
1556 {
1557 u32 lastRead = txRing->lastRead;
1558 struct tx_desc *tx;
1559 dma_addr_t skb_dma;
1560 int dma_len;
1561 int type;
1562
1563 while(lastRead != le32_to_cpu(*index)) {
1564 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1565 type = tx->flags & TYPHOON_TYPE_MASK;
1566
1567 if(type == TYPHOON_TX_DESC) {
1568 /* This tx_desc describes a packet.
1569 */
1570 unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
1571 struct sk_buff *skb = (struct sk_buff *) ptr;
1572 dev_kfree_skb_irq(skb);
1573 } else if(type == TYPHOON_FRAG_DESC) {
1574 /* This tx_desc describes a memory mapping. Free it.
1575 */
1576 skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
1577 dma_len = le16_to_cpu(tx->len);
1578 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1579 PCI_DMA_TODEVICE);
1580 }
1581
1582 tx->flags = 0;
1583 typhoon_inc_tx_index(&lastRead, 1);
1584 }
1585
1586 return lastRead;
1587 }
1588
1589 static void
1590 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1591 volatile u32 * index)
1592 {
1593 u32 lastRead;
1594 int numDesc = MAX_SKB_FRAGS + 1;
1595
1596 /* This will need changing if we start to use the Hi Tx ring. */
1597 lastRead = typhoon_clean_tx(tp, txRing, index);
1598 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1599 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1600 netif_wake_queue(tp->dev);
1601
1602 txRing->lastRead = lastRead;
1603 smp_wmb();
1604 }
1605
1606 static void
1607 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1608 {
1609 struct typhoon_indexes *indexes = tp->indexes;
1610 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1611 struct basic_ring *ring = &tp->rxBuffRing;
1612 struct rx_free *r;
1613
1614 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1615 indexes->rxBuffCleared) {
1616 /* no room in ring, just drop the skb
1617 */
1618 dev_kfree_skb_any(rxb->skb);
1619 rxb->skb = NULL;
1620 return;
1621 }
1622
1623 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1624 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1625 r->virtAddr = idx;
1626 r->physAddr = cpu_to_le32(rxb->dma_addr);
1627
1628 /* Tell the card about it */
1629 wmb();
1630 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1631 }
1632
1633 static int
1634 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1635 {
1636 struct typhoon_indexes *indexes = tp->indexes;
1637 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1638 struct basic_ring *ring = &tp->rxBuffRing;
1639 struct rx_free *r;
1640 struct sk_buff *skb;
1641 dma_addr_t dma_addr;
1642
1643 rxb->skb = NULL;
1644
1645 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1646 indexes->rxBuffCleared)
1647 return -ENOMEM;
1648
1649 skb = dev_alloc_skb(PKT_BUF_SZ);
1650 if(!skb)
1651 return -ENOMEM;
1652
1653 #if 0
1654 /* Please, 3com, fix the firmware to allow DMA to a unaligned
1655 * address! Pretty please?
1656 */
1657 skb_reserve(skb, 2);
1658 #endif
1659
1660 skb->dev = tp->dev;
1661 dma_addr = pci_map_single(tp->pdev, skb->data,
1662 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1663
1664 /* Since no card does 64 bit DAC, the high bits will never
1665 * change from zero.
1666 */
1667 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1668 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1669 r->virtAddr = idx;
1670 r->physAddr = cpu_to_le32(dma_addr);
1671 rxb->skb = skb;
1672 rxb->dma_addr = dma_addr;
1673
1674 /* Tell the card about it */
1675 wmb();
1676 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1677 return 0;
1678 }
1679
1680 static int
1681 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1682 volatile u32 * cleared, int budget)
1683 {
1684 struct rx_desc *rx;
1685 struct sk_buff *skb, *new_skb;
1686 struct rxbuff_ent *rxb;
1687 dma_addr_t dma_addr;
1688 u32 local_ready;
1689 u32 rxaddr;
1690 int pkt_len;
1691 u32 idx;
1692 u32 csum_bits;
1693 int received;
1694
1695 received = 0;
1696 local_ready = le32_to_cpu(*ready);
1697 rxaddr = le32_to_cpu(*cleared);
1698 while(rxaddr != local_ready && budget > 0) {
1699 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1700 idx = rx->addr;
1701 rxb = &tp->rxbuffers[idx];
1702 skb = rxb->skb;
1703 dma_addr = rxb->dma_addr;
1704
1705 typhoon_inc_rx_index(&rxaddr, 1);
1706
1707 if(rx->flags & TYPHOON_RX_ERROR) {
1708 typhoon_recycle_rx_skb(tp, idx);
1709 continue;
1710 }
1711
1712 pkt_len = le16_to_cpu(rx->frameLen);
1713
1714 if(pkt_len < rx_copybreak &&
1715 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1716 new_skb->dev = tp->dev;
1717 skb_reserve(new_skb, 2);
1718 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1719 PKT_BUF_SZ,
1720 PCI_DMA_FROMDEVICE);
1721 eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
1722 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1723 PKT_BUF_SZ,
1724 PCI_DMA_FROMDEVICE);
1725 skb_put(new_skb, pkt_len);
1726 typhoon_recycle_rx_skb(tp, idx);
1727 } else {
1728 new_skb = skb;
1729 skb_put(new_skb, pkt_len);
1730 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1731 PCI_DMA_FROMDEVICE);
1732 typhoon_alloc_rx_skb(tp, idx);
1733 }
1734 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1735 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1736 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1737 if(csum_bits ==
1738 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1739 || csum_bits ==
1740 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1741 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1742 } else
1743 new_skb->ip_summed = CHECKSUM_NONE;
1744
1745 spin_lock(&tp->state_lock);
1746 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1747 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1748 ntohl(rx->vlanTag) & 0xffff);
1749 else
1750 netif_receive_skb(new_skb);
1751 spin_unlock(&tp->state_lock);
1752
1753 tp->dev->last_rx = jiffies;
1754 received++;
1755 budget--;
1756 }
1757 *cleared = cpu_to_le32(rxaddr);
1758
1759 return received;
1760 }
1761
1762 static void
1763 typhoon_fill_free_ring(struct typhoon *tp)
1764 {
1765 u32 i;
1766
1767 for(i = 0; i < RXENT_ENTRIES; i++) {
1768 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1769 if(rxb->skb)
1770 continue;
1771 if(typhoon_alloc_rx_skb(tp, i) < 0)
1772 break;
1773 }
1774 }
1775
1776 static int
1777 typhoon_poll(struct net_device *dev, int *total_budget)
1778 {
1779 struct typhoon *tp = netdev_priv(dev);
1780 struct typhoon_indexes *indexes = tp->indexes;
1781 int orig_budget = *total_budget;
1782 int budget, work_done, done;
1783
1784 rmb();
1785 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1786 typhoon_process_response(tp, 0, NULL);
1787
1788 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1789 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1790
1791 if(orig_budget > dev->quota)
1792 orig_budget = dev->quota;
1793
1794 budget = orig_budget;
1795 work_done = 0;
1796 done = 1;
1797
1798 if(indexes->rxHiCleared != indexes->rxHiReady) {
1799 work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1800 &indexes->rxHiCleared, budget);
1801 budget -= work_done;
1802 }
1803
1804 if(indexes->rxLoCleared != indexes->rxLoReady) {
1805 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1806 &indexes->rxLoCleared, budget);
1807 }
1808
1809 if(work_done) {
1810 *total_budget -= work_done;
1811 dev->quota -= work_done;
1812
1813 if(work_done >= orig_budget)
1814 done = 0;
1815 }
1816
1817 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1818 /* rxBuff ring is empty, try to fill it. */
1819 typhoon_fill_free_ring(tp);
1820 }
1821
1822 if(done) {
1823 netif_rx_complete(dev);
1824 iowrite32(TYPHOON_INTR_NONE,
1825 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1826 typhoon_post_pci_writes(tp->ioaddr);
1827 }
1828
1829 return (done ? 0 : 1);
1830 }
1831
1832 static irqreturn_t
1833 typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1834 {
1835 struct net_device *dev = (struct net_device *) dev_instance;
1836 struct typhoon *tp = dev->priv;
1837 void __iomem *ioaddr = tp->ioaddr;
1838 u32 intr_status;
1839
1840 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1841 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1842 return IRQ_NONE;
1843
1844 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1845
1846 if(netif_rx_schedule_prep(dev)) {
1847 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1848 typhoon_post_pci_writes(ioaddr);
1849 __netif_rx_schedule(dev);
1850 } else {
1851 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1852 dev->name);
1853 }
1854 return IRQ_HANDLED;
1855 }
1856
1857 static void
1858 typhoon_free_rx_rings(struct typhoon *tp)
1859 {
1860 u32 i;
1861
1862 for(i = 0; i < RXENT_ENTRIES; i++) {
1863 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1864 if(rxb->skb) {
1865 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1866 PCI_DMA_FROMDEVICE);
1867 dev_kfree_skb(rxb->skb);
1868 rxb->skb = NULL;
1869 }
1870 }
1871 }
1872
1873 static int
1874 typhoon_sleep(struct typhoon *tp, pci_power_t state, u16 events)
1875 {
1876 struct pci_dev *pdev = tp->pdev;
1877 void __iomem *ioaddr = tp->ioaddr;
1878 struct cmd_desc xp_cmd;
1879 int err;
1880
1881 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1882 xp_cmd.parm1 = events;
1883 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1884 if(err < 0) {
1885 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1886 tp->name, err);
1887 return err;
1888 }
1889
1890 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1891 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1892 if(err < 0) {
1893 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1894 tp->name, err);
1895 return err;
1896 }
1897
1898 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1899 return -ETIMEDOUT;
1900
1901 /* Since we cannot monitor the status of the link while sleeping,
1902 * tell the world it went away.
1903 */
1904 netif_carrier_off(tp->dev);
1905
1906 pci_enable_wake(tp->pdev, state, 1);
1907 pci_disable_device(pdev);
1908 return pci_set_power_state(pdev, state);
1909 }
1910
1911 static int
1912 typhoon_wakeup(struct typhoon *tp, int wait_type)
1913 {
1914 struct pci_dev *pdev = tp->pdev;
1915 void __iomem *ioaddr = tp->ioaddr;
1916
1917 pci_set_power_state(pdev, PCI_D0);
1918 pci_restore_state(pdev);
1919
1920 /* Post 2.x.x versions of the Sleep Image require a reset before
1921 * we can download the Runtime Image. But let's not make users of
1922 * the old firmware pay for the reset.
1923 */
1924 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1925 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1926 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1927 return typhoon_reset(ioaddr, wait_type);
1928
1929 return 0;
1930 }
1931
1932 static int
1933 typhoon_start_runtime(struct typhoon *tp)
1934 {
1935 struct net_device *dev = tp->dev;
1936 void __iomem *ioaddr = tp->ioaddr;
1937 struct cmd_desc xp_cmd;
1938 int err;
1939
1940 typhoon_init_rings(tp);
1941 typhoon_fill_free_ring(tp);
1942
1943 err = typhoon_download_firmware(tp);
1944 if(err < 0) {
1945 printk("%s: cannot load runtime on 3XP\n", tp->name);
1946 goto error_out;
1947 }
1948
1949 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1950 printk("%s: cannot boot 3XP\n", tp->name);
1951 err = -EIO;
1952 goto error_out;
1953 }
1954
1955 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1956 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1957 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1958 if(err < 0)
1959 goto error_out;
1960
1961 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1962 xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
1963 xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
1964 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1965 if(err < 0)
1966 goto error_out;
1967
1968 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1969 * us some more information on how to control it.
1970 */
1971 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1972 xp_cmd.parm1 = 0;
1973 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1974 if(err < 0)
1975 goto error_out;
1976
1977 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1978 xp_cmd.parm1 = tp->xcvr_select;
1979 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1980 if(err < 0)
1981 goto error_out;
1982
1983 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1984 xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
1985 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1986 if(err < 0)
1987 goto error_out;
1988
1989 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1990 spin_lock_bh(&tp->state_lock);
1991 xp_cmd.parm2 = tp->offload;
1992 xp_cmd.parm3 = tp->offload;
1993 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1994 spin_unlock_bh(&tp->state_lock);
1995 if(err < 0)
1996 goto error_out;
1997
1998 typhoon_set_rx_mode(dev);
1999
2000 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2001 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2002 if(err < 0)
2003 goto error_out;
2004
2005 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2006 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2007 if(err < 0)
2008 goto error_out;
2009
2010 tp->card_state = Running;
2011 smp_wmb();
2012
2013 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2014 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2015 typhoon_post_pci_writes(ioaddr);
2016
2017 return 0;
2018
2019 error_out:
2020 typhoon_reset(ioaddr, WaitNoSleep);
2021 typhoon_free_rx_rings(tp);
2022 typhoon_init_rings(tp);
2023 return err;
2024 }
2025
2026 static int
2027 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2028 {
2029 struct typhoon_indexes *indexes = tp->indexes;
2030 struct transmit_ring *txLo = &tp->txLoRing;
2031 void __iomem *ioaddr = tp->ioaddr;
2032 struct cmd_desc xp_cmd;
2033 int i;
2034
2035 /* Disable interrupts early, since we can't schedule a poll
2036 * when called with !netif_running(). This will be posted
2037 * when we force the posting of the command.
2038 */
2039 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2040
2041 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2042 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2043
2044 /* Wait 1/2 sec for any outstanding transmits to occur
2045 * We'll cleanup after the reset if this times out.
2046 */
2047 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2048 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2049 break;
2050 udelay(TYPHOON_UDELAY);
2051 }
2052
2053 if(i == TYPHOON_WAIT_TIMEOUT)
2054 printk(KERN_ERR
2055 "%s: halt timed out waiting for Tx to complete\n",
2056 tp->name);
2057
2058 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2059 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2060
2061 /* save the statistics so when we bring the interface up again,
2062 * the values reported to userspace are correct.
2063 */
2064 tp->card_state = Sleeping;
2065 smp_wmb();
2066 typhoon_do_get_stats(tp);
2067 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2068
2069 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2070 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2071
2072 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2073 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2074 tp->name);
2075
2076 if(typhoon_reset(ioaddr, wait_type) < 0) {
2077 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2078 return -ETIMEDOUT;
2079 }
2080
2081 /* cleanup any outstanding Tx packets */
2082 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2083 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2084 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2085 }
2086
2087 return 0;
2088 }
2089
2090 static void
2091 typhoon_tx_timeout(struct net_device *dev)
2092 {
2093 struct typhoon *tp = netdev_priv(dev);
2094
2095 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2096 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2097 dev->name);
2098 goto truely_dead;
2099 }
2100
2101 /* If we ever start using the Hi ring, it will need cleaning too */
2102 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2103 typhoon_free_rx_rings(tp);
2104
2105 if(typhoon_start_runtime(tp) < 0) {
2106 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2107 dev->name);
2108 goto truely_dead;
2109 }
2110
2111 netif_wake_queue(dev);
2112 return;
2113
2114 truely_dead:
2115 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2116 typhoon_reset(tp->ioaddr, NoWait);
2117 netif_carrier_off(dev);
2118 }
2119
2120 static int
2121 typhoon_open(struct net_device *dev)
2122 {
2123 struct typhoon *tp = netdev_priv(dev);
2124 int err;
2125
2126 err = typhoon_wakeup(tp, WaitSleep);
2127 if(err < 0) {
2128 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2129 goto out_sleep;
2130 }
2131
2132 err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED,
2133 dev->name, dev);
2134 if(err < 0)
2135 goto out_sleep;
2136
2137 err = typhoon_start_runtime(tp);
2138 if(err < 0)
2139 goto out_irq;
2140
2141 netif_start_queue(dev);
2142 return 0;
2143
2144 out_irq:
2145 free_irq(dev->irq, dev);
2146
2147 out_sleep:
2148 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2149 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2150 dev->name);
2151 typhoon_reset(tp->ioaddr, NoWait);
2152 goto out;
2153 }
2154
2155 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2156 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2157
2158 out:
2159 return err;
2160 }
2161
2162 static int
2163 typhoon_close(struct net_device *dev)
2164 {
2165 struct typhoon *tp = netdev_priv(dev);
2166
2167 netif_stop_queue(dev);
2168
2169 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2170 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2171
2172 /* Make sure there is no irq handler running on a different CPU. */
2173 typhoon_synchronize_irq(dev->irq);
2174 free_irq(dev->irq, dev);
2175
2176 typhoon_free_rx_rings(tp);
2177 typhoon_init_rings(tp);
2178
2179 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2180 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2181
2182 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2183 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2184
2185 return 0;
2186 }
2187
2188 #ifdef CONFIG_PM
2189 static int
2190 typhoon_resume(struct pci_dev *pdev)
2191 {
2192 struct net_device *dev = pci_get_drvdata(pdev);
2193 struct typhoon *tp = netdev_priv(dev);
2194
2195 /* If we're down, resume when we are upped.
2196 */
2197 if(!netif_running(dev))
2198 return 0;
2199
2200 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2201 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2202 dev->name);
2203 goto reset;
2204 }
2205
2206 if(typhoon_start_runtime(tp) < 0) {
2207 printk(KERN_ERR "%s: critical: could not start runtime in "
2208 "resume\n", dev->name);
2209 goto reset;
2210 }
2211
2212 netif_device_attach(dev);
2213 netif_start_queue(dev);
2214 return 0;
2215
2216 reset:
2217 typhoon_reset(tp->ioaddr, NoWait);
2218 return -EBUSY;
2219 }
2220
2221 static int
2222 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2223 {
2224 struct net_device *dev = pci_get_drvdata(pdev);
2225 struct typhoon *tp = netdev_priv(dev);
2226 struct cmd_desc xp_cmd;
2227
2228 /* If we're down, we're already suspended.
2229 */
2230 if(!netif_running(dev))
2231 return 0;
2232
2233 spin_lock_bh(&tp->state_lock);
2234 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2235 spin_unlock_bh(&tp->state_lock);
2236 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2237 dev->name);
2238 return -EBUSY;
2239 }
2240 spin_unlock_bh(&tp->state_lock);
2241
2242 netif_device_detach(dev);
2243
2244 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2245 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2246 goto need_resume;
2247 }
2248
2249 typhoon_free_rx_rings(tp);
2250 typhoon_init_rings(tp);
2251
2252 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2253 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2254 goto need_resume;
2255 }
2256
2257 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2258 xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
2259 xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
2260 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2261 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2262 dev->name);
2263 goto need_resume;
2264 }
2265
2266 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2267 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2268 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2269 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2270 dev->name);
2271 goto need_resume;
2272 }
2273
2274 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2275 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2276 goto need_resume;
2277 }
2278
2279 return 0;
2280
2281 need_resume:
2282 typhoon_resume(pdev);
2283 return -EBUSY;
2284 }
2285
2286 static int
2287 typhoon_enable_wake(struct pci_dev *pdev, pci_power_t state, int enable)
2288 {
2289 return pci_enable_wake(pdev, state, enable);
2290 }
2291 #endif
2292
2293 static int __devinit
2294 typhoon_test_mmio(struct pci_dev *pdev)
2295 {
2296 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2297 int mode = 0;
2298 u32 val;
2299
2300 if(!ioaddr)
2301 goto out;
2302
2303 if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2304 TYPHOON_STATUS_WAITING_FOR_HOST)
2305 goto out_unmap;
2306
2307 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2308 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2309 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2310
2311 /* Ok, see if we can change our interrupt status register by
2312 * sending ourselves an interrupt. If so, then MMIO works.
2313 * The 50usec delay is arbitrary -- it could probably be smaller.
2314 */
2315 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2316 if((val & TYPHOON_INTR_SELF) == 0) {
2317 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2318 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2319 udelay(50);
2320 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2321 if(val & TYPHOON_INTR_SELF)
2322 mode = 1;
2323 }
2324
2325 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2326 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2327 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2328 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2329
2330 out_unmap:
2331 pci_iounmap(pdev, ioaddr);
2332
2333 out:
2334 if(!mode)
2335 printk(KERN_INFO PFX "falling back to port IO\n");
2336 return mode;
2337 }
2338
2339 static int __devinit
2340 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2341 {
2342 static int did_version = 0;
2343 struct net_device *dev;
2344 struct typhoon *tp;
2345 int card_id = (int) ent->driver_data;
2346 void __iomem *ioaddr;
2347 void *shared;
2348 dma_addr_t shared_dma;
2349 struct cmd_desc xp_cmd;
2350 struct resp_desc xp_resp[3];
2351 int i;
2352 int err = 0;
2353
2354 if(!did_version++)
2355 printk(KERN_INFO "%s", version);
2356
2357 dev = alloc_etherdev(sizeof(*tp));
2358 if(dev == NULL) {
2359 printk(ERR_PFX "%s: unable to alloc new net device\n",
2360 pci_name(pdev));
2361 err = -ENOMEM;
2362 goto error_out;
2363 }
2364 SET_MODULE_OWNER(dev);
2365 SET_NETDEV_DEV(dev, &pdev->dev);
2366
2367 err = pci_enable_device(pdev);
2368 if(err < 0) {
2369 printk(ERR_PFX "%s: unable to enable device\n",
2370 pci_name(pdev));
2371 goto error_out_dev;
2372 }
2373
2374 err = pci_set_mwi(pdev);
2375 if(err < 0) {
2376 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
2377 goto error_out_disable;
2378 }
2379
2380 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2381 if(err < 0) {
2382 printk(ERR_PFX "%s: No usable DMA configuration\n",
2383 pci_name(pdev));
2384 goto error_out_mwi;
2385 }
2386
2387 /* sanity checks on IO and MMIO BARs
2388 */
2389 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2390 printk(ERR_PFX
2391 "%s: region #1 not a PCI IO resource, aborting\n",
2392 pci_name(pdev));
2393 err = -ENODEV;
2394 goto error_out_mwi;
2395 }
2396 if(pci_resource_len(pdev, 0) < 128) {
2397 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
2398 pci_name(pdev));
2399 err = -ENODEV;
2400 goto error_out_mwi;
2401 }
2402 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2403 printk(ERR_PFX
2404 "%s: region #1 not a PCI MMIO resource, aborting\n",
2405 pci_name(pdev));
2406 err = -ENODEV;
2407 goto error_out_mwi;
2408 }
2409 if(pci_resource_len(pdev, 1) < 128) {
2410 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2411 pci_name(pdev));
2412 err = -ENODEV;
2413 goto error_out_mwi;
2414 }
2415
2416 err = pci_request_regions(pdev, "typhoon");
2417 if(err < 0) {
2418 printk(ERR_PFX "%s: could not request regions\n",
2419 pci_name(pdev));
2420 goto error_out_mwi;
2421 }
2422
2423 /* map our registers
2424 */
2425 if(use_mmio != 0 && use_mmio != 1)
2426 use_mmio = typhoon_test_mmio(pdev);
2427
2428 ioaddr = pci_iomap(pdev, use_mmio, 128);
2429 if (!ioaddr) {
2430 printk(ERR_PFX "%s: cannot remap registers, aborting\n",
2431 pci_name(pdev));
2432 err = -EIO;
2433 goto error_out_regions;
2434 }
2435
2436 /* allocate pci dma space for rx and tx descriptor rings
2437 */
2438 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2439 &shared_dma);
2440 if(!shared) {
2441 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2442 pci_name(pdev));
2443 err = -ENOMEM;
2444 goto error_out_remap;
2445 }
2446
2447 dev->irq = pdev->irq;
2448 tp = netdev_priv(dev);
2449 tp->shared = (struct typhoon_shared *) shared;
2450 tp->shared_dma = shared_dma;
2451 tp->pdev = pdev;
2452 tp->tx_pdev = pdev;
2453 tp->ioaddr = ioaddr;
2454 tp->tx_ioaddr = ioaddr;
2455 tp->dev = dev;
2456
2457 /* Init sequence:
2458 * 1) Reset the adapter to clear any bad juju
2459 * 2) Reload the sleep image
2460 * 3) Boot the sleep image
2461 * 4) Get the hardware address.
2462 * 5) Put the card to sleep.
2463 */
2464 if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2465 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2466 err = -EIO;
2467 goto error_out_dma;
2468 }
2469
2470 /* Now that we've reset the 3XP and are sure it's not going to
2471 * write all over memory, enable bus mastering, and save our
2472 * state for resuming after a suspend.
2473 */
2474 pci_set_master(pdev);
2475 pci_save_state(pdev);
2476
2477 /* dev->name is not valid until we register, but we need to
2478 * use some common routines to initialize the card. So that those
2479 * routines print the right name, we keep our oun pointer to the name
2480 */
2481 tp->name = pci_name(pdev);
2482
2483 typhoon_init_interface(tp);
2484 typhoon_init_rings(tp);
2485
2486 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2487 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2488 pci_name(pdev));
2489 err = -EIO;
2490 goto error_out_reset;
2491 }
2492
2493 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2494 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2495 printk(ERR_PFX "%s: cannot read MAC address\n",
2496 pci_name(pdev));
2497 err = -EIO;
2498 goto error_out_reset;
2499 }
2500
2501 *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2502 *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2503
2504 if(!is_valid_ether_addr(dev->dev_addr)) {
2505 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2506 "aborting\n", pci_name(pdev));
2507 goto error_out_reset;
2508 }
2509
2510 /* Read the Sleep Image version last, so the response is valid
2511 * later when we print out the version reported.
2512 */
2513 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2514 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2515 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2516 pci_name(pdev));
2517 goto error_out_reset;
2518 }
2519
2520 tp->capabilities = typhoon_card_info[card_id].capabilities;
2521 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2522
2523 /* Typhoon 1.0 Sleep Images return one response descriptor to the
2524 * READ_VERSIONS command. Those versions are OK after waking up
2525 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2526 * seem to need a little extra help to get started. Since we don't
2527 * know how to nudge it along, just kick it.
2528 */
2529 if(xp_resp[0].numDesc != 0)
2530 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2531
2532 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2533 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2534 pci_name(pdev));
2535 err = -EIO;
2536 goto error_out_reset;
2537 }
2538
2539 /* The chip-specific entries in the device structure. */
2540 dev->open = typhoon_open;
2541 dev->hard_start_xmit = typhoon_start_tx;
2542 dev->stop = typhoon_close;
2543 dev->set_multicast_list = typhoon_set_rx_mode;
2544 dev->tx_timeout = typhoon_tx_timeout;
2545 dev->poll = typhoon_poll;
2546 dev->weight = 16;
2547 dev->watchdog_timeo = TX_TIMEOUT;
2548 dev->get_stats = typhoon_get_stats;
2549 dev->set_mac_address = typhoon_set_mac_address;
2550 dev->vlan_rx_register = typhoon_vlan_rx_register;
2551 dev->vlan_rx_kill_vid = typhoon_vlan_rx_kill_vid;
2552 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2553
2554 /* We can handle scatter gather, up to 16 entries, and
2555 * we can do IP checksumming (only version 4, doh...)
2556 */
2557 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2558 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2559 dev->features |= NETIF_F_TSO;
2560
2561 if(register_netdev(dev) < 0)
2562 goto error_out_reset;
2563
2564 /* fixup our local name */
2565 tp->name = dev->name;
2566
2567 pci_set_drvdata(pdev, dev);
2568
2569 printk(KERN_INFO "%s: %s at %s 0x%llx, ",
2570 dev->name, typhoon_card_info[card_id].name,
2571 use_mmio ? "MMIO" : "IO",
2572 (unsigned long long)pci_resource_start(pdev, use_mmio));
2573 for(i = 0; i < 5; i++)
2574 printk("%2.2x:", dev->dev_addr[i]);
2575 printk("%2.2x\n", dev->dev_addr[i]);
2576
2577 /* xp_resp still contains the response to the READ_VERSIONS command.
2578 * For debugging, let the user know what version he has.
2579 */
2580 if(xp_resp[0].numDesc == 0) {
2581 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2582 * of version is Month/Day of build.
2583 */
2584 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2585 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2586 "%02u/%02u/2000\n", dev->name, monthday >> 8,
2587 monthday & 0xff);
2588 } else if(xp_resp[0].numDesc == 2) {
2589 /* This is the Typhoon 1.1+ type Sleep Image
2590 */
2591 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2592 u8 *ver_string = (u8 *) &xp_resp[1];
2593 ver_string[25] = 0;
2594 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2595 "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
2596 (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
2597 ver_string);
2598 } else {
2599 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2600 "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2601 le32_to_cpu(xp_resp[0].parm2));
2602 }
2603
2604 return 0;
2605
2606 error_out_reset:
2607 typhoon_reset(ioaddr, NoWait);
2608
2609 error_out_dma:
2610 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2611 shared, shared_dma);
2612 error_out_remap:
2613 pci_iounmap(pdev, ioaddr);
2614 error_out_regions:
2615 pci_release_regions(pdev);
2616 error_out_mwi:
2617 pci_clear_mwi(pdev);
2618 error_out_disable:
2619 pci_disable_device(pdev);
2620 error_out_dev:
2621 free_netdev(dev);
2622 error_out:
2623 return err;
2624 }
2625
2626 static void __devexit
2627 typhoon_remove_one(struct pci_dev *pdev)
2628 {
2629 struct net_device *dev = pci_get_drvdata(pdev);
2630 struct typhoon *tp = netdev_priv(dev);
2631
2632 unregister_netdev(dev);
2633 pci_set_power_state(pdev, PCI_D0);
2634 pci_restore_state(pdev);
2635 typhoon_reset(tp->ioaddr, NoWait);
2636 pci_iounmap(pdev, tp->ioaddr);
2637 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2638 tp->shared, tp->shared_dma);
2639 pci_release_regions(pdev);
2640 pci_clear_mwi(pdev);
2641 pci_disable_device(pdev);
2642 pci_set_drvdata(pdev, NULL);
2643 free_netdev(dev);
2644 }
2645
2646 static struct pci_driver typhoon_driver = {
2647 .name = DRV_MODULE_NAME,
2648 .id_table = typhoon_pci_tbl,
2649 .probe = typhoon_init_one,
2650 .remove = __devexit_p(typhoon_remove_one),
2651 #ifdef CONFIG_PM
2652 .suspend = typhoon_suspend,
2653 .resume = typhoon_resume,
2654 .enable_wake = typhoon_enable_wake,
2655 #endif
2656 };
2657
2658 static int __init
2659 typhoon_init(void)
2660 {
2661 return pci_register_driver(&typhoon_driver);
2662 }
2663
2664 static void __exit
2665 typhoon_cleanup(void)
2666 {
2667 pci_unregister_driver(&typhoon_driver);
2668 }
2669
2670 module_init(typhoon_init);
2671 module_exit(typhoon_cleanup);