tg3: Code cleanup.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME "tg3"
66 #define PFX DRV_MODULE_NAME ": "
67 #define DRV_MODULE_VERSION "3.92"
68 #define DRV_MODULE_RELDATE "May 2, 2008"
69
70 #define TG3_DEF_MAC_MODE 0
71 #define TG3_DEF_RX_MODE 0
72 #define TG3_DEF_TX_MODE 0
73 #define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86 #define TG3_TX_TIMEOUT (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU 60
90 #define TG3_MAX_MTU(tp) \
91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97 #define TG3_RX_RING_SIZE 512
98 #define TG3_DEF_RX_RING_PENDING 200
99 #define TG3_RX_JUMBO_RING_SIZE 256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108 #define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111 #define TG3_TX_RING_SIZE 512
112 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
122 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST 6
134
135 static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213 {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219 const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221 { "rx_octets" },
222 { "rx_fragments" },
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
226 { "rx_fcs_errors" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
233 { "rx_jabbers" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
247
248 { "tx_octets" },
249 { "tx_collisions" },
250
251 { "tx_xon_sent" },
252 { "tx_xoff_sent" },
253 { "tx_flow_control" },
254 { "tx_mac_errors" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
257 { "tx_deferred" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
278 { "tx_discards" },
279 { "tx_errors" },
280
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
283 { "rxbds_empty" },
284 { "rx_discards" },
285 { "rx_errors" },
286 { "rx_threshold_hit" },
287
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
291
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
294 { "nic_irqs" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300 const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312 writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317 return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322 writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327 return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332 unsigned long flags;
333
334 spin_lock_irqsave(&tp->indirect_lock, flags);
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348 unsigned long flags;
349 u32 val;
350
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
355 return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360 unsigned long flags;
361
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
365 return;
366 }
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
370 return;
371 }
372
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
380 */
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382 (val == 0x1)) {
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385 }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390 unsigned long flags;
391 u32 val;
392
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404 */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
411 else {
412 /* Posted method */
413 tg3_write32(tp, off, val);
414 if (usec_wait)
415 udelay(usec_wait);
416 tp->read32(tp, off);
417 }
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
420 */
421 if (usec_wait)
422 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427 tp->write32_mbox(tp, off, val);
428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435 void __iomem *mbox = tp->regs + off;
436 writel(val, mbox);
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438 writel(val, mbox);
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445 return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450 writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val) tp->write32(tp, reg, val)
460 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg) tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466 unsigned long flags;
467
468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470 return;
471
472 spin_lock_irqsave(&tp->indirect_lock, flags);
473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479 } else {
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 }
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491 unsigned long flags;
492
493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495 *val = 0;
496 return;
497 }
498
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518 int i;
519
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528 int i, off;
529 int ret = 0;
530 u32 status;
531
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533 return 0;
534
535 switch (locknum) {
536 case TG3_APE_LOCK_MEM:
537 break;
538 default:
539 return -EINVAL;
540 }
541
542 off = 4 * locknum;
543
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
550 break;
551 udelay(10);
552 }
553
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
558
559 ret = -EBUSY;
560 }
561
562 return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567 int off;
568
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570 return;
571
572 switch (locknum) {
573 case TG3_APE_LOCK_MEM:
574 break;
575 default:
576 return;
577 }
578
579 off = 4 * locknum;
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595 else
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602 tp->irq_sync = 0;
603 wmb();
604
605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
612 tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
619
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
625 work_exists = 1;
626 }
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630 work_exists = 1;
631
632 return work_exists;
633 }
634
635 /* tg3_restart_ints
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
638 * which reenables interrupts
639 */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643 tp->last_tag << 24);
644 mmiowb();
645
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
649 */
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651 tg3_has_work(tp))
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
659 napi_disable(&tp->napi);
660 netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
669 */
670 napi_enable(&tp->napi);
671 tp->hw_status->status |= SD_STATUS_UPDATED;
672 tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678 u32 orig_clock_ctrl;
679
680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682 return;
683
684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
687 0x1f);
688 tp->pci_clock_ctrl = clock_ctrl;
689
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694 }
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697 clock_ctrl |
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699 40);
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
702 40);
703 }
704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS 5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711 u32 frame_val;
712 unsigned int loops;
713 int ret;
714
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 tw32_f(MAC_MI_MODE,
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718 udelay(80);
719 }
720
721 *val = 0x0;
722
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735
736 if ((frame_val & MI_COM_BUSY) == 0) {
737 udelay(5);
738 frame_val = tr32(MAC_MI_COM);
739 break;
740 }
741 loops -= 1;
742 }
743
744 ret = -EBUSY;
745 if (loops != 0) {
746 *val = frame_val & MI_COM_DATA_MASK;
747 ret = 0;
748 }
749
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
752 udelay(80);
753 }
754
755 return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760 u32 frame_val;
761 unsigned int loops;
762 int ret;
763
764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766 return 0;
767
768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769 tw32_f(MAC_MI_MODE,
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771 udelay(80);
772 }
773
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781 tw32_f(MAC_MI_COM, frame_val);
782
783 loops = PHY_BUSY_LOOPS;
784 while (loops != 0) {
785 udelay(10);
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
788 udelay(5);
789 frame_val = tr32(MAC_MI_COM);
790 break;
791 }
792 loops -= 1;
793 }
794
795 ret = -EBUSY;
796 if (loops != 0)
797 ret = 0;
798
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
801 udelay(80);
802 }
803
804 return ret;
805 }
806
807 static int tg3_bmcr_reset(struct tg3 *tp)
808 {
809 u32 phy_control;
810 int limit, err;
811
812 /* OK, reset it, and poll the BMCR_RESET bit until it
813 * clears or we time out.
814 */
815 phy_control = BMCR_RESET;
816 err = tg3_writephy(tp, MII_BMCR, phy_control);
817 if (err != 0)
818 return -EBUSY;
819
820 limit = 5000;
821 while (limit--) {
822 err = tg3_readphy(tp, MII_BMCR, &phy_control);
823 if (err != 0)
824 return -EBUSY;
825
826 if ((phy_control & BMCR_RESET) == 0) {
827 udelay(40);
828 break;
829 }
830 udelay(10);
831 }
832 if (limit <= 0)
833 return -EBUSY;
834
835 return 0;
836 }
837
838 /* tp->lock is held. */
839 static void tg3_wait_for_event_ack(struct tg3 *tp)
840 {
841 int i;
842
843 /* Wait for up to 2.5 milliseconds */
844 for (i = 0; i < 250000; i++) {
845 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
846 break;
847 udelay(10);
848 }
849 }
850
851 /* tp->lock is held. */
852 static void tg3_ump_link_report(struct tg3 *tp)
853 {
854 u32 reg;
855 u32 val;
856
857 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
858 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
859 return;
860
861 tg3_wait_for_event_ack(tp);
862
863 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
864
865 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
866
867 val = 0;
868 if (!tg3_readphy(tp, MII_BMCR, &reg))
869 val = reg << 16;
870 if (!tg3_readphy(tp, MII_BMSR, &reg))
871 val |= (reg & 0xffff);
872 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
873
874 val = 0;
875 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
876 val = reg << 16;
877 if (!tg3_readphy(tp, MII_LPA, &reg))
878 val |= (reg & 0xffff);
879 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
880
881 val = 0;
882 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
883 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
884 val = reg << 16;
885 if (!tg3_readphy(tp, MII_STAT1000, &reg))
886 val |= (reg & 0xffff);
887 }
888 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
889
890 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
891 val = reg << 16;
892 else
893 val = 0;
894 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
895
896 val = tr32(GRC_RX_CPU_EVENT);
897 val |= GRC_RX_CPU_DRIVER_EVENT;
898 tw32_f(GRC_RX_CPU_EVENT, val);
899 }
900
901 static void tg3_link_report(struct tg3 *tp)
902 {
903 if (!netif_carrier_ok(tp->dev)) {
904 if (netif_msg_link(tp))
905 printk(KERN_INFO PFX "%s: Link is down.\n",
906 tp->dev->name);
907 tg3_ump_link_report(tp);
908 } else if (netif_msg_link(tp)) {
909 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
910 tp->dev->name,
911 (tp->link_config.active_speed == SPEED_1000 ?
912 1000 :
913 (tp->link_config.active_speed == SPEED_100 ?
914 100 : 10)),
915 (tp->link_config.active_duplex == DUPLEX_FULL ?
916 "full" : "half"));
917
918 printk(KERN_INFO PFX
919 "%s: Flow control is %s for TX and %s for RX.\n",
920 tp->dev->name,
921 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
922 "on" : "off",
923 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
924 "on" : "off");
925 tg3_ump_link_report(tp);
926 }
927 }
928
929 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
930 {
931 u16 miireg;
932
933 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
934 miireg = ADVERTISE_PAUSE_CAP;
935 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
936 miireg = ADVERTISE_PAUSE_ASYM;
937 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
938 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
939 else
940 miireg = 0;
941
942 return miireg;
943 }
944
945 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
946 {
947 u16 miireg;
948
949 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
950 miireg = ADVERTISE_1000XPAUSE;
951 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
952 miireg = ADVERTISE_1000XPSE_ASYM;
953 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
954 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
955 else
956 miireg = 0;
957
958 return miireg;
959 }
960
961 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
962 {
963 u8 cap = 0;
964
965 if (lcladv & ADVERTISE_PAUSE_CAP) {
966 if (lcladv & ADVERTISE_PAUSE_ASYM) {
967 if (rmtadv & LPA_PAUSE_CAP)
968 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
969 else if (rmtadv & LPA_PAUSE_ASYM)
970 cap = TG3_FLOW_CTRL_RX;
971 } else {
972 if (rmtadv & LPA_PAUSE_CAP)
973 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
974 }
975 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
976 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
977 cap = TG3_FLOW_CTRL_TX;
978 }
979
980 return cap;
981 }
982
983 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
984 {
985 u8 cap = 0;
986
987 if (lcladv & ADVERTISE_1000XPAUSE) {
988 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
989 if (rmtadv & LPA_1000XPAUSE)
990 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
991 else if (rmtadv & LPA_1000XPAUSE_ASYM)
992 cap = TG3_FLOW_CTRL_RX;
993 } else {
994 if (rmtadv & LPA_1000XPAUSE)
995 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
996 }
997 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
998 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
999 cap = TG3_FLOW_CTRL_TX;
1000 }
1001
1002 return cap;
1003 }
1004
1005 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1006 {
1007 u8 flowctrl = 0;
1008 u32 old_rx_mode = tp->rx_mode;
1009 u32 old_tx_mode = tp->tx_mode;
1010
1011 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1012 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1013 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1014 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1015 else
1016 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1017 } else
1018 flowctrl = tp->link_config.flowctrl;
1019
1020 tp->link_config.active_flowctrl = flowctrl;
1021
1022 if (flowctrl & TG3_FLOW_CTRL_RX)
1023 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1024 else
1025 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1026
1027 if (old_rx_mode != tp->rx_mode)
1028 tw32_f(MAC_RX_MODE, tp->rx_mode);
1029
1030 if (flowctrl & TG3_FLOW_CTRL_TX)
1031 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1032 else
1033 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1034
1035 if (old_tx_mode != tp->tx_mode)
1036 tw32_f(MAC_TX_MODE, tp->tx_mode);
1037 }
1038
1039 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1040 {
1041 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1042 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1043 }
1044
1045 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1046 {
1047 u32 phy;
1048
1049 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1050 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1051 return;
1052
1053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1054 u32 ephy;
1055
1056 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1057 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1058 ephy | MII_TG3_EPHY_SHADOW_EN);
1059 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1060 if (enable)
1061 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1062 else
1063 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1064 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1065 }
1066 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1067 }
1068 } else {
1069 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1070 MII_TG3_AUXCTL_SHDWSEL_MISC;
1071 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1072 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1073 if (enable)
1074 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1075 else
1076 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1077 phy |= MII_TG3_AUXCTL_MISC_WREN;
1078 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1079 }
1080 }
1081 }
1082
1083 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1084 {
1085 u32 val;
1086
1087 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1088 return;
1089
1090 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1091 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1092 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1093 (val | (1 << 15) | (1 << 4)));
1094 }
1095
1096 static void tg3_phy_apply_otp(struct tg3 *tp)
1097 {
1098 u32 otp, phy;
1099
1100 if (!tp->phy_otp)
1101 return;
1102
1103 otp = tp->phy_otp;
1104
1105 /* Enable SM_DSP clock and tx 6dB coding. */
1106 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1107 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1108 MII_TG3_AUXCTL_ACTL_TX_6DB;
1109 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1110
1111 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1112 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1113 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1114
1115 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1116 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1117 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1118
1119 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1120 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1121 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1122
1123 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1124 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1125
1126 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1127 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1128
1129 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1130 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1131 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1132
1133 /* Turn off SM_DSP clock. */
1134 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1135 MII_TG3_AUXCTL_ACTL_TX_6DB;
1136 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1137 }
1138
1139 static int tg3_wait_macro_done(struct tg3 *tp)
1140 {
1141 int limit = 100;
1142
1143 while (limit--) {
1144 u32 tmp32;
1145
1146 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1147 if ((tmp32 & 0x1000) == 0)
1148 break;
1149 }
1150 }
1151 if (limit <= 0)
1152 return -EBUSY;
1153
1154 return 0;
1155 }
1156
1157 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1158 {
1159 static const u32 test_pat[4][6] = {
1160 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1161 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1162 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1163 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1164 };
1165 int chan;
1166
1167 for (chan = 0; chan < 4; chan++) {
1168 int i;
1169
1170 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1171 (chan * 0x2000) | 0x0200);
1172 tg3_writephy(tp, 0x16, 0x0002);
1173
1174 for (i = 0; i < 6; i++)
1175 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1176 test_pat[chan][i]);
1177
1178 tg3_writephy(tp, 0x16, 0x0202);
1179 if (tg3_wait_macro_done(tp)) {
1180 *resetp = 1;
1181 return -EBUSY;
1182 }
1183
1184 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1185 (chan * 0x2000) | 0x0200);
1186 tg3_writephy(tp, 0x16, 0x0082);
1187 if (tg3_wait_macro_done(tp)) {
1188 *resetp = 1;
1189 return -EBUSY;
1190 }
1191
1192 tg3_writephy(tp, 0x16, 0x0802);
1193 if (tg3_wait_macro_done(tp)) {
1194 *resetp = 1;
1195 return -EBUSY;
1196 }
1197
1198 for (i = 0; i < 6; i += 2) {
1199 u32 low, high;
1200
1201 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1202 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1203 tg3_wait_macro_done(tp)) {
1204 *resetp = 1;
1205 return -EBUSY;
1206 }
1207 low &= 0x7fff;
1208 high &= 0x000f;
1209 if (low != test_pat[chan][i] ||
1210 high != test_pat[chan][i+1]) {
1211 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1212 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1213 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1214
1215 return -EBUSY;
1216 }
1217 }
1218 }
1219
1220 return 0;
1221 }
1222
1223 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1224 {
1225 int chan;
1226
1227 for (chan = 0; chan < 4; chan++) {
1228 int i;
1229
1230 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1231 (chan * 0x2000) | 0x0200);
1232 tg3_writephy(tp, 0x16, 0x0002);
1233 for (i = 0; i < 6; i++)
1234 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1235 tg3_writephy(tp, 0x16, 0x0202);
1236 if (tg3_wait_macro_done(tp))
1237 return -EBUSY;
1238 }
1239
1240 return 0;
1241 }
1242
1243 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1244 {
1245 u32 reg32, phy9_orig;
1246 int retries, do_phy_reset, err;
1247
1248 retries = 10;
1249 do_phy_reset = 1;
1250 do {
1251 if (do_phy_reset) {
1252 err = tg3_bmcr_reset(tp);
1253 if (err)
1254 return err;
1255 do_phy_reset = 0;
1256 }
1257
1258 /* Disable transmitter and interrupt. */
1259 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1260 continue;
1261
1262 reg32 |= 0x3000;
1263 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1264
1265 /* Set full-duplex, 1000 mbps. */
1266 tg3_writephy(tp, MII_BMCR,
1267 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1268
1269 /* Set to master mode. */
1270 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1271 continue;
1272
1273 tg3_writephy(tp, MII_TG3_CTRL,
1274 (MII_TG3_CTRL_AS_MASTER |
1275 MII_TG3_CTRL_ENABLE_AS_MASTER));
1276
1277 /* Enable SM_DSP_CLOCK and 6dB. */
1278 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1279
1280 /* Block the PHY control access. */
1281 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1282 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1283
1284 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1285 if (!err)
1286 break;
1287 } while (--retries);
1288
1289 err = tg3_phy_reset_chanpat(tp);
1290 if (err)
1291 return err;
1292
1293 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1294 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1295
1296 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1297 tg3_writephy(tp, 0x16, 0x0000);
1298
1299 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1300 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301 /* Set Extended packet length bit for jumbo frames */
1302 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1303 }
1304 else {
1305 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1306 }
1307
1308 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1309
1310 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1311 reg32 &= ~0x3000;
1312 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1313 } else if (!err)
1314 err = -EBUSY;
1315
1316 return err;
1317 }
1318
1319 /* This will reset the tigon3 PHY if there is no valid
1320 * link unless the FORCE argument is non-zero.
1321 */
1322 static int tg3_phy_reset(struct tg3 *tp)
1323 {
1324 u32 cpmuctrl;
1325 u32 phy_status;
1326 int err;
1327
1328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1329 u32 val;
1330
1331 val = tr32(GRC_MISC_CFG);
1332 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1333 udelay(40);
1334 }
1335 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1336 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1337 if (err != 0)
1338 return -EBUSY;
1339
1340 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1341 netif_carrier_off(tp->dev);
1342 tg3_link_report(tp);
1343 }
1344
1345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1348 err = tg3_phy_reset_5703_4_5(tp);
1349 if (err)
1350 return err;
1351 goto out;
1352 }
1353
1354 cpmuctrl = 0;
1355 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1356 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1357 cpmuctrl = tr32(TG3_CPMU_CTRL);
1358 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1359 tw32(TG3_CPMU_CTRL,
1360 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1361 }
1362
1363 err = tg3_bmcr_reset(tp);
1364 if (err)
1365 return err;
1366
1367 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1368 u32 phy;
1369
1370 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1371 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1372
1373 tw32(TG3_CPMU_CTRL, cpmuctrl);
1374 }
1375
1376 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1377 u32 val;
1378
1379 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1380 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1381 CPMU_LSPD_1000MB_MACCLK_12_5) {
1382 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1383 udelay(40);
1384 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1385 }
1386
1387 /* Disable GPHY autopowerdown. */
1388 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1389 MII_TG3_MISC_SHDW_WREN |
1390 MII_TG3_MISC_SHDW_APD_SEL |
1391 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1392 }
1393
1394 tg3_phy_apply_otp(tp);
1395
1396 out:
1397 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1398 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1399 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1400 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1401 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1402 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1403 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1404 }
1405 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1406 tg3_writephy(tp, 0x1c, 0x8d68);
1407 tg3_writephy(tp, 0x1c, 0x8d68);
1408 }
1409 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1410 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1411 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1412 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1413 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1414 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1415 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1416 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1417 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1418 }
1419 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1420 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1421 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1422 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1423 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1424 tg3_writephy(tp, MII_TG3_TEST1,
1425 MII_TG3_TEST1_TRIM_EN | 0x4);
1426 } else
1427 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1428 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1429 }
1430 /* Set Extended packet length bit (bit 14) on all chips that */
1431 /* support jumbo frames */
1432 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1433 /* Cannot do read-modify-write on 5401 */
1434 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1435 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1436 u32 phy_reg;
1437
1438 /* Set bit 14 with read-modify-write to preserve other bits */
1439 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1440 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1441 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1442 }
1443
1444 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1445 * jumbo frames transmission.
1446 */
1447 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1448 u32 phy_reg;
1449
1450 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1451 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1452 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1453 }
1454
1455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1456 /* adjust output voltage */
1457 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1458 }
1459
1460 tg3_phy_toggle_automdix(tp, 1);
1461 tg3_phy_set_wirespeed(tp);
1462 return 0;
1463 }
1464
1465 static void tg3_frob_aux_power(struct tg3 *tp)
1466 {
1467 struct tg3 *tp_peer = tp;
1468
1469 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1470 return;
1471
1472 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1473 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1474 struct net_device *dev_peer;
1475
1476 dev_peer = pci_get_drvdata(tp->pdev_peer);
1477 /* remove_one() may have been run on the peer. */
1478 if (!dev_peer)
1479 tp_peer = tp;
1480 else
1481 tp_peer = netdev_priv(dev_peer);
1482 }
1483
1484 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1485 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1486 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1487 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1490 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1491 (GRC_LCLCTRL_GPIO_OE0 |
1492 GRC_LCLCTRL_GPIO_OE1 |
1493 GRC_LCLCTRL_GPIO_OE2 |
1494 GRC_LCLCTRL_GPIO_OUTPUT0 |
1495 GRC_LCLCTRL_GPIO_OUTPUT1),
1496 100);
1497 } else {
1498 u32 no_gpio2;
1499 u32 grc_local_ctrl = 0;
1500
1501 if (tp_peer != tp &&
1502 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1503 return;
1504
1505 /* Workaround to prevent overdrawing Amps. */
1506 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1507 ASIC_REV_5714) {
1508 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1509 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1510 grc_local_ctrl, 100);
1511 }
1512
1513 /* On 5753 and variants, GPIO2 cannot be used. */
1514 no_gpio2 = tp->nic_sram_data_cfg &
1515 NIC_SRAM_DATA_CFG_NO_GPIO2;
1516
1517 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1518 GRC_LCLCTRL_GPIO_OE1 |
1519 GRC_LCLCTRL_GPIO_OE2 |
1520 GRC_LCLCTRL_GPIO_OUTPUT1 |
1521 GRC_LCLCTRL_GPIO_OUTPUT2;
1522 if (no_gpio2) {
1523 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1524 GRC_LCLCTRL_GPIO_OUTPUT2);
1525 }
1526 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1527 grc_local_ctrl, 100);
1528
1529 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1530
1531 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1532 grc_local_ctrl, 100);
1533
1534 if (!no_gpio2) {
1535 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1536 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1537 grc_local_ctrl, 100);
1538 }
1539 }
1540 } else {
1541 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1542 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1543 if (tp_peer != tp &&
1544 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1545 return;
1546
1547 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1548 (GRC_LCLCTRL_GPIO_OE1 |
1549 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1550
1551 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1552 GRC_LCLCTRL_GPIO_OE1, 100);
1553
1554 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1555 (GRC_LCLCTRL_GPIO_OE1 |
1556 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1557 }
1558 }
1559 }
1560
1561 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1562 {
1563 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1564 return 1;
1565 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1566 if (speed != SPEED_10)
1567 return 1;
1568 } else if (speed == SPEED_10)
1569 return 1;
1570
1571 return 0;
1572 }
1573
1574 static int tg3_setup_phy(struct tg3 *, int);
1575
1576 #define RESET_KIND_SHUTDOWN 0
1577 #define RESET_KIND_INIT 1
1578 #define RESET_KIND_SUSPEND 2
1579
1580 static void tg3_write_sig_post_reset(struct tg3 *, int);
1581 static int tg3_halt_cpu(struct tg3 *, u32);
1582 static int tg3_nvram_lock(struct tg3 *);
1583 static void tg3_nvram_unlock(struct tg3 *);
1584
1585 static void tg3_power_down_phy(struct tg3 *tp)
1586 {
1587 u32 val;
1588
1589 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1590 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1591 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1592 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1593
1594 sg_dig_ctrl |=
1595 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1596 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1597 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1598 }
1599 return;
1600 }
1601
1602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1603 tg3_bmcr_reset(tp);
1604 val = tr32(GRC_MISC_CFG);
1605 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1606 udelay(40);
1607 return;
1608 } else {
1609 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1610 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1611 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1612 }
1613
1614 /* The PHY should not be powered down on some chips because
1615 * of bugs.
1616 */
1617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1619 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1620 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1621 return;
1622
1623 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1624 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1625 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1626 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1627 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1628 }
1629
1630 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1631 }
1632
1633 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1634 {
1635 u32 misc_host_ctrl;
1636 u16 power_control, power_caps;
1637 int pm = tp->pm_cap;
1638
1639 /* Make sure register accesses (indirect or otherwise)
1640 * will function correctly.
1641 */
1642 pci_write_config_dword(tp->pdev,
1643 TG3PCI_MISC_HOST_CTRL,
1644 tp->misc_host_ctrl);
1645
1646 pci_read_config_word(tp->pdev,
1647 pm + PCI_PM_CTRL,
1648 &power_control);
1649 power_control |= PCI_PM_CTRL_PME_STATUS;
1650 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1651 switch (state) {
1652 case PCI_D0:
1653 power_control |= 0;
1654 pci_write_config_word(tp->pdev,
1655 pm + PCI_PM_CTRL,
1656 power_control);
1657 udelay(100); /* Delay after power state change */
1658
1659 /* Switch out of Vaux if it is a NIC */
1660 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1661 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1662
1663 return 0;
1664
1665 case PCI_D1:
1666 power_control |= 1;
1667 break;
1668
1669 case PCI_D2:
1670 power_control |= 2;
1671 break;
1672
1673 case PCI_D3hot:
1674 power_control |= 3;
1675 break;
1676
1677 default:
1678 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1679 "requested.\n",
1680 tp->dev->name, state);
1681 return -EINVAL;
1682 };
1683
1684 power_control |= PCI_PM_CTRL_PME_ENABLE;
1685
1686 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1687 tw32(TG3PCI_MISC_HOST_CTRL,
1688 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1689
1690 if (tp->link_config.phy_is_low_power == 0) {
1691 tp->link_config.phy_is_low_power = 1;
1692 tp->link_config.orig_speed = tp->link_config.speed;
1693 tp->link_config.orig_duplex = tp->link_config.duplex;
1694 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1695 }
1696
1697 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1698 tp->link_config.speed = SPEED_10;
1699 tp->link_config.duplex = DUPLEX_HALF;
1700 tp->link_config.autoneg = AUTONEG_ENABLE;
1701 tg3_setup_phy(tp, 0);
1702 }
1703
1704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1705 u32 val;
1706
1707 val = tr32(GRC_VCPU_EXT_CTRL);
1708 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1709 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1710 int i;
1711 u32 val;
1712
1713 for (i = 0; i < 200; i++) {
1714 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1715 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1716 break;
1717 msleep(1);
1718 }
1719 }
1720 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1721 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1722 WOL_DRV_STATE_SHUTDOWN |
1723 WOL_DRV_WOL |
1724 WOL_SET_MAGIC_PKT);
1725
1726 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1727
1728 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1729 u32 mac_mode;
1730
1731 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1732 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1733 udelay(40);
1734
1735 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1736 mac_mode = MAC_MODE_PORT_MODE_GMII;
1737 else
1738 mac_mode = MAC_MODE_PORT_MODE_MII;
1739
1740 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1741 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1742 ASIC_REV_5700) {
1743 u32 speed = (tp->tg3_flags &
1744 TG3_FLAG_WOL_SPEED_100MB) ?
1745 SPEED_100 : SPEED_10;
1746 if (tg3_5700_link_polarity(tp, speed))
1747 mac_mode |= MAC_MODE_LINK_POLARITY;
1748 else
1749 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1750 }
1751 } else {
1752 mac_mode = MAC_MODE_PORT_MODE_TBI;
1753 }
1754
1755 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1756 tw32(MAC_LED_CTRL, tp->led_ctrl);
1757
1758 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1759 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1760 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1761
1762 tw32_f(MAC_MODE, mac_mode);
1763 udelay(100);
1764
1765 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1766 udelay(10);
1767 }
1768
1769 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1770 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1771 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1772 u32 base_val;
1773
1774 base_val = tp->pci_clock_ctrl;
1775 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1776 CLOCK_CTRL_TXCLK_DISABLE);
1777
1778 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1779 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1780 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1781 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1782 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1783 /* do nothing */
1784 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1785 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1786 u32 newbits1, newbits2;
1787
1788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1789 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1790 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1791 CLOCK_CTRL_TXCLK_DISABLE |
1792 CLOCK_CTRL_ALTCLK);
1793 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1794 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1795 newbits1 = CLOCK_CTRL_625_CORE;
1796 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1797 } else {
1798 newbits1 = CLOCK_CTRL_ALTCLK;
1799 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1800 }
1801
1802 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1803 40);
1804
1805 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1806 40);
1807
1808 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1809 u32 newbits3;
1810
1811 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1812 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1813 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1814 CLOCK_CTRL_TXCLK_DISABLE |
1815 CLOCK_CTRL_44MHZ_CORE);
1816 } else {
1817 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1818 }
1819
1820 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1821 tp->pci_clock_ctrl | newbits3, 40);
1822 }
1823 }
1824
1825 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1826 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1827 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1828 tg3_power_down_phy(tp);
1829
1830 tg3_frob_aux_power(tp);
1831
1832 /* Workaround for unstable PLL clock */
1833 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1834 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1835 u32 val = tr32(0x7d00);
1836
1837 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1838 tw32(0x7d00, val);
1839 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1840 int err;
1841
1842 err = tg3_nvram_lock(tp);
1843 tg3_halt_cpu(tp, RX_CPU_BASE);
1844 if (!err)
1845 tg3_nvram_unlock(tp);
1846 }
1847 }
1848
1849 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1850
1851 /* Finally, set the new power state. */
1852 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1853 udelay(100); /* Delay after power state change */
1854
1855 return 0;
1856 }
1857
1858 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1859 {
1860 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1861 case MII_TG3_AUX_STAT_10HALF:
1862 *speed = SPEED_10;
1863 *duplex = DUPLEX_HALF;
1864 break;
1865
1866 case MII_TG3_AUX_STAT_10FULL:
1867 *speed = SPEED_10;
1868 *duplex = DUPLEX_FULL;
1869 break;
1870
1871 case MII_TG3_AUX_STAT_100HALF:
1872 *speed = SPEED_100;
1873 *duplex = DUPLEX_HALF;
1874 break;
1875
1876 case MII_TG3_AUX_STAT_100FULL:
1877 *speed = SPEED_100;
1878 *duplex = DUPLEX_FULL;
1879 break;
1880
1881 case MII_TG3_AUX_STAT_1000HALF:
1882 *speed = SPEED_1000;
1883 *duplex = DUPLEX_HALF;
1884 break;
1885
1886 case MII_TG3_AUX_STAT_1000FULL:
1887 *speed = SPEED_1000;
1888 *duplex = DUPLEX_FULL;
1889 break;
1890
1891 default:
1892 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1893 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1894 SPEED_10;
1895 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1896 DUPLEX_HALF;
1897 break;
1898 }
1899 *speed = SPEED_INVALID;
1900 *duplex = DUPLEX_INVALID;
1901 break;
1902 };
1903 }
1904
1905 static void tg3_phy_copper_begin(struct tg3 *tp)
1906 {
1907 u32 new_adv;
1908 int i;
1909
1910 if (tp->link_config.phy_is_low_power) {
1911 /* Entering low power mode. Disable gigabit and
1912 * 100baseT advertisements.
1913 */
1914 tg3_writephy(tp, MII_TG3_CTRL, 0);
1915
1916 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1917 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1918 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1919 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1920
1921 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1922 } else if (tp->link_config.speed == SPEED_INVALID) {
1923 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1924 tp->link_config.advertising &=
1925 ~(ADVERTISED_1000baseT_Half |
1926 ADVERTISED_1000baseT_Full);
1927
1928 new_adv = ADVERTISE_CSMA;
1929 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1930 new_adv |= ADVERTISE_10HALF;
1931 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1932 new_adv |= ADVERTISE_10FULL;
1933 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1934 new_adv |= ADVERTISE_100HALF;
1935 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1936 new_adv |= ADVERTISE_100FULL;
1937
1938 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1939
1940 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1941
1942 if (tp->link_config.advertising &
1943 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1944 new_adv = 0;
1945 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1946 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1947 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1948 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1949 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1950 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1951 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1952 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1953 MII_TG3_CTRL_ENABLE_AS_MASTER);
1954 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1955 } else {
1956 tg3_writephy(tp, MII_TG3_CTRL, 0);
1957 }
1958 } else {
1959 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1960 new_adv |= ADVERTISE_CSMA;
1961
1962 /* Asking for a specific link mode. */
1963 if (tp->link_config.speed == SPEED_1000) {
1964 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1965
1966 if (tp->link_config.duplex == DUPLEX_FULL)
1967 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1968 else
1969 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1970 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1971 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1972 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1973 MII_TG3_CTRL_ENABLE_AS_MASTER);
1974 } else {
1975 if (tp->link_config.speed == SPEED_100) {
1976 if (tp->link_config.duplex == DUPLEX_FULL)
1977 new_adv |= ADVERTISE_100FULL;
1978 else
1979 new_adv |= ADVERTISE_100HALF;
1980 } else {
1981 if (tp->link_config.duplex == DUPLEX_FULL)
1982 new_adv |= ADVERTISE_10FULL;
1983 else
1984 new_adv |= ADVERTISE_10HALF;
1985 }
1986 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1987
1988 new_adv = 0;
1989 }
1990
1991 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1992 }
1993
1994 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1995 tp->link_config.speed != SPEED_INVALID) {
1996 u32 bmcr, orig_bmcr;
1997
1998 tp->link_config.active_speed = tp->link_config.speed;
1999 tp->link_config.active_duplex = tp->link_config.duplex;
2000
2001 bmcr = 0;
2002 switch (tp->link_config.speed) {
2003 default:
2004 case SPEED_10:
2005 break;
2006
2007 case SPEED_100:
2008 bmcr |= BMCR_SPEED100;
2009 break;
2010
2011 case SPEED_1000:
2012 bmcr |= TG3_BMCR_SPEED1000;
2013 break;
2014 };
2015
2016 if (tp->link_config.duplex == DUPLEX_FULL)
2017 bmcr |= BMCR_FULLDPLX;
2018
2019 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2020 (bmcr != orig_bmcr)) {
2021 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2022 for (i = 0; i < 1500; i++) {
2023 u32 tmp;
2024
2025 udelay(10);
2026 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2027 tg3_readphy(tp, MII_BMSR, &tmp))
2028 continue;
2029 if (!(tmp & BMSR_LSTATUS)) {
2030 udelay(40);
2031 break;
2032 }
2033 }
2034 tg3_writephy(tp, MII_BMCR, bmcr);
2035 udelay(40);
2036 }
2037 } else {
2038 tg3_writephy(tp, MII_BMCR,
2039 BMCR_ANENABLE | BMCR_ANRESTART);
2040 }
2041 }
2042
2043 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2044 {
2045 int err;
2046
2047 /* Turn off tap power management. */
2048 /* Set Extended packet length bit */
2049 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2050
2051 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2052 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2053
2054 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2055 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2056
2057 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2058 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2059
2060 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2061 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2062
2063 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2064 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2065
2066 udelay(40);
2067
2068 return err;
2069 }
2070
2071 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2072 {
2073 u32 adv_reg, all_mask = 0;
2074
2075 if (mask & ADVERTISED_10baseT_Half)
2076 all_mask |= ADVERTISE_10HALF;
2077 if (mask & ADVERTISED_10baseT_Full)
2078 all_mask |= ADVERTISE_10FULL;
2079 if (mask & ADVERTISED_100baseT_Half)
2080 all_mask |= ADVERTISE_100HALF;
2081 if (mask & ADVERTISED_100baseT_Full)
2082 all_mask |= ADVERTISE_100FULL;
2083
2084 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2085 return 0;
2086
2087 if ((adv_reg & all_mask) != all_mask)
2088 return 0;
2089 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2090 u32 tg3_ctrl;
2091
2092 all_mask = 0;
2093 if (mask & ADVERTISED_1000baseT_Half)
2094 all_mask |= ADVERTISE_1000HALF;
2095 if (mask & ADVERTISED_1000baseT_Full)
2096 all_mask |= ADVERTISE_1000FULL;
2097
2098 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2099 return 0;
2100
2101 if ((tg3_ctrl & all_mask) != all_mask)
2102 return 0;
2103 }
2104 return 1;
2105 }
2106
2107 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2108 {
2109 u32 curadv, reqadv;
2110
2111 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2112 return 1;
2113
2114 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2115 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2116
2117 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2118 if (curadv != reqadv)
2119 return 0;
2120
2121 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2122 tg3_readphy(tp, MII_LPA, rmtadv);
2123 } else {
2124 /* Reprogram the advertisement register, even if it
2125 * does not affect the current link. If the link
2126 * gets renegotiated in the future, we can save an
2127 * additional renegotiation cycle by advertising
2128 * it correctly in the first place.
2129 */
2130 if (curadv != reqadv) {
2131 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2132 ADVERTISE_PAUSE_ASYM);
2133 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2134 }
2135 }
2136
2137 return 1;
2138 }
2139
2140 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2141 {
2142 int current_link_up;
2143 u32 bmsr, dummy;
2144 u32 lcl_adv, rmt_adv;
2145 u16 current_speed;
2146 u8 current_duplex;
2147 int i, err;
2148
2149 tw32(MAC_EVENT, 0);
2150
2151 tw32_f(MAC_STATUS,
2152 (MAC_STATUS_SYNC_CHANGED |
2153 MAC_STATUS_CFG_CHANGED |
2154 MAC_STATUS_MI_COMPLETION |
2155 MAC_STATUS_LNKSTATE_CHANGED));
2156 udelay(40);
2157
2158 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2159 tw32_f(MAC_MI_MODE,
2160 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2161 udelay(80);
2162 }
2163
2164 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2165
2166 /* Some third-party PHYs need to be reset on link going
2167 * down.
2168 */
2169 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2170 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2172 netif_carrier_ok(tp->dev)) {
2173 tg3_readphy(tp, MII_BMSR, &bmsr);
2174 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2175 !(bmsr & BMSR_LSTATUS))
2176 force_reset = 1;
2177 }
2178 if (force_reset)
2179 tg3_phy_reset(tp);
2180
2181 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2182 tg3_readphy(tp, MII_BMSR, &bmsr);
2183 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2184 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2185 bmsr = 0;
2186
2187 if (!(bmsr & BMSR_LSTATUS)) {
2188 err = tg3_init_5401phy_dsp(tp);
2189 if (err)
2190 return err;
2191
2192 tg3_readphy(tp, MII_BMSR, &bmsr);
2193 for (i = 0; i < 1000; i++) {
2194 udelay(10);
2195 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2196 (bmsr & BMSR_LSTATUS)) {
2197 udelay(40);
2198 break;
2199 }
2200 }
2201
2202 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2203 !(bmsr & BMSR_LSTATUS) &&
2204 tp->link_config.active_speed == SPEED_1000) {
2205 err = tg3_phy_reset(tp);
2206 if (!err)
2207 err = tg3_init_5401phy_dsp(tp);
2208 if (err)
2209 return err;
2210 }
2211 }
2212 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2213 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2214 /* 5701 {A0,B0} CRC bug workaround */
2215 tg3_writephy(tp, 0x15, 0x0a75);
2216 tg3_writephy(tp, 0x1c, 0x8c68);
2217 tg3_writephy(tp, 0x1c, 0x8d68);
2218 tg3_writephy(tp, 0x1c, 0x8c68);
2219 }
2220
2221 /* Clear pending interrupts... */
2222 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2223 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2224
2225 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2226 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2227 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2228 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2229
2230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2231 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2232 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2233 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2234 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2235 else
2236 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2237 }
2238
2239 current_link_up = 0;
2240 current_speed = SPEED_INVALID;
2241 current_duplex = DUPLEX_INVALID;
2242
2243 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2244 u32 val;
2245
2246 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2247 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2248 if (!(val & (1 << 10))) {
2249 val |= (1 << 10);
2250 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2251 goto relink;
2252 }
2253 }
2254
2255 bmsr = 0;
2256 for (i = 0; i < 100; i++) {
2257 tg3_readphy(tp, MII_BMSR, &bmsr);
2258 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2259 (bmsr & BMSR_LSTATUS))
2260 break;
2261 udelay(40);
2262 }
2263
2264 if (bmsr & BMSR_LSTATUS) {
2265 u32 aux_stat, bmcr;
2266
2267 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2268 for (i = 0; i < 2000; i++) {
2269 udelay(10);
2270 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2271 aux_stat)
2272 break;
2273 }
2274
2275 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2276 &current_speed,
2277 &current_duplex);
2278
2279 bmcr = 0;
2280 for (i = 0; i < 200; i++) {
2281 tg3_readphy(tp, MII_BMCR, &bmcr);
2282 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2283 continue;
2284 if (bmcr && bmcr != 0x7fff)
2285 break;
2286 udelay(10);
2287 }
2288
2289 lcl_adv = 0;
2290 rmt_adv = 0;
2291
2292 tp->link_config.active_speed = current_speed;
2293 tp->link_config.active_duplex = current_duplex;
2294
2295 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2296 if ((bmcr & BMCR_ANENABLE) &&
2297 tg3_copper_is_advertising_all(tp,
2298 tp->link_config.advertising)) {
2299 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2300 &rmt_adv))
2301 current_link_up = 1;
2302 }
2303 } else {
2304 if (!(bmcr & BMCR_ANENABLE) &&
2305 tp->link_config.speed == current_speed &&
2306 tp->link_config.duplex == current_duplex &&
2307 tp->link_config.flowctrl ==
2308 tp->link_config.active_flowctrl) {
2309 current_link_up = 1;
2310 }
2311 }
2312
2313 if (current_link_up == 1 &&
2314 tp->link_config.active_duplex == DUPLEX_FULL)
2315 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2316 }
2317
2318 relink:
2319 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2320 u32 tmp;
2321
2322 tg3_phy_copper_begin(tp);
2323
2324 tg3_readphy(tp, MII_BMSR, &tmp);
2325 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2326 (tmp & BMSR_LSTATUS))
2327 current_link_up = 1;
2328 }
2329
2330 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2331 if (current_link_up == 1) {
2332 if (tp->link_config.active_speed == SPEED_100 ||
2333 tp->link_config.active_speed == SPEED_10)
2334 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2335 else
2336 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2337 } else
2338 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2339
2340 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2341 if (tp->link_config.active_duplex == DUPLEX_HALF)
2342 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2343
2344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2345 if (current_link_up == 1 &&
2346 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2347 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2348 else
2349 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2350 }
2351
2352 /* ??? Without this setting Netgear GA302T PHY does not
2353 * ??? send/receive packets...
2354 */
2355 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2356 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2357 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2358 tw32_f(MAC_MI_MODE, tp->mi_mode);
2359 udelay(80);
2360 }
2361
2362 tw32_f(MAC_MODE, tp->mac_mode);
2363 udelay(40);
2364
2365 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2366 /* Polled via timer. */
2367 tw32_f(MAC_EVENT, 0);
2368 } else {
2369 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2370 }
2371 udelay(40);
2372
2373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2374 current_link_up == 1 &&
2375 tp->link_config.active_speed == SPEED_1000 &&
2376 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2377 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2378 udelay(120);
2379 tw32_f(MAC_STATUS,
2380 (MAC_STATUS_SYNC_CHANGED |
2381 MAC_STATUS_CFG_CHANGED));
2382 udelay(40);
2383 tg3_write_mem(tp,
2384 NIC_SRAM_FIRMWARE_MBOX,
2385 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2386 }
2387
2388 if (current_link_up != netif_carrier_ok(tp->dev)) {
2389 if (current_link_up)
2390 netif_carrier_on(tp->dev);
2391 else
2392 netif_carrier_off(tp->dev);
2393 tg3_link_report(tp);
2394 }
2395
2396 return 0;
2397 }
2398
2399 struct tg3_fiber_aneginfo {
2400 int state;
2401 #define ANEG_STATE_UNKNOWN 0
2402 #define ANEG_STATE_AN_ENABLE 1
2403 #define ANEG_STATE_RESTART_INIT 2
2404 #define ANEG_STATE_RESTART 3
2405 #define ANEG_STATE_DISABLE_LINK_OK 4
2406 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2407 #define ANEG_STATE_ABILITY_DETECT 6
2408 #define ANEG_STATE_ACK_DETECT_INIT 7
2409 #define ANEG_STATE_ACK_DETECT 8
2410 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2411 #define ANEG_STATE_COMPLETE_ACK 10
2412 #define ANEG_STATE_IDLE_DETECT_INIT 11
2413 #define ANEG_STATE_IDLE_DETECT 12
2414 #define ANEG_STATE_LINK_OK 13
2415 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2416 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2417
2418 u32 flags;
2419 #define MR_AN_ENABLE 0x00000001
2420 #define MR_RESTART_AN 0x00000002
2421 #define MR_AN_COMPLETE 0x00000004
2422 #define MR_PAGE_RX 0x00000008
2423 #define MR_NP_LOADED 0x00000010
2424 #define MR_TOGGLE_TX 0x00000020
2425 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2426 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2427 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2428 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2429 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2430 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2431 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2432 #define MR_TOGGLE_RX 0x00002000
2433 #define MR_NP_RX 0x00004000
2434
2435 #define MR_LINK_OK 0x80000000
2436
2437 unsigned long link_time, cur_time;
2438
2439 u32 ability_match_cfg;
2440 int ability_match_count;
2441
2442 char ability_match, idle_match, ack_match;
2443
2444 u32 txconfig, rxconfig;
2445 #define ANEG_CFG_NP 0x00000080
2446 #define ANEG_CFG_ACK 0x00000040
2447 #define ANEG_CFG_RF2 0x00000020
2448 #define ANEG_CFG_RF1 0x00000010
2449 #define ANEG_CFG_PS2 0x00000001
2450 #define ANEG_CFG_PS1 0x00008000
2451 #define ANEG_CFG_HD 0x00004000
2452 #define ANEG_CFG_FD 0x00002000
2453 #define ANEG_CFG_INVAL 0x00001f06
2454
2455 };
2456 #define ANEG_OK 0
2457 #define ANEG_DONE 1
2458 #define ANEG_TIMER_ENAB 2
2459 #define ANEG_FAILED -1
2460
2461 #define ANEG_STATE_SETTLE_TIME 10000
2462
2463 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2464 struct tg3_fiber_aneginfo *ap)
2465 {
2466 u16 flowctrl;
2467 unsigned long delta;
2468 u32 rx_cfg_reg;
2469 int ret;
2470
2471 if (ap->state == ANEG_STATE_UNKNOWN) {
2472 ap->rxconfig = 0;
2473 ap->link_time = 0;
2474 ap->cur_time = 0;
2475 ap->ability_match_cfg = 0;
2476 ap->ability_match_count = 0;
2477 ap->ability_match = 0;
2478 ap->idle_match = 0;
2479 ap->ack_match = 0;
2480 }
2481 ap->cur_time++;
2482
2483 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2484 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2485
2486 if (rx_cfg_reg != ap->ability_match_cfg) {
2487 ap->ability_match_cfg = rx_cfg_reg;
2488 ap->ability_match = 0;
2489 ap->ability_match_count = 0;
2490 } else {
2491 if (++ap->ability_match_count > 1) {
2492 ap->ability_match = 1;
2493 ap->ability_match_cfg = rx_cfg_reg;
2494 }
2495 }
2496 if (rx_cfg_reg & ANEG_CFG_ACK)
2497 ap->ack_match = 1;
2498 else
2499 ap->ack_match = 0;
2500
2501 ap->idle_match = 0;
2502 } else {
2503 ap->idle_match = 1;
2504 ap->ability_match_cfg = 0;
2505 ap->ability_match_count = 0;
2506 ap->ability_match = 0;
2507 ap->ack_match = 0;
2508
2509 rx_cfg_reg = 0;
2510 }
2511
2512 ap->rxconfig = rx_cfg_reg;
2513 ret = ANEG_OK;
2514
2515 switch(ap->state) {
2516 case ANEG_STATE_UNKNOWN:
2517 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2518 ap->state = ANEG_STATE_AN_ENABLE;
2519
2520 /* fallthru */
2521 case ANEG_STATE_AN_ENABLE:
2522 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2523 if (ap->flags & MR_AN_ENABLE) {
2524 ap->link_time = 0;
2525 ap->cur_time = 0;
2526 ap->ability_match_cfg = 0;
2527 ap->ability_match_count = 0;
2528 ap->ability_match = 0;
2529 ap->idle_match = 0;
2530 ap->ack_match = 0;
2531
2532 ap->state = ANEG_STATE_RESTART_INIT;
2533 } else {
2534 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2535 }
2536 break;
2537
2538 case ANEG_STATE_RESTART_INIT:
2539 ap->link_time = ap->cur_time;
2540 ap->flags &= ~(MR_NP_LOADED);
2541 ap->txconfig = 0;
2542 tw32(MAC_TX_AUTO_NEG, 0);
2543 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2544 tw32_f(MAC_MODE, tp->mac_mode);
2545 udelay(40);
2546
2547 ret = ANEG_TIMER_ENAB;
2548 ap->state = ANEG_STATE_RESTART;
2549
2550 /* fallthru */
2551 case ANEG_STATE_RESTART:
2552 delta = ap->cur_time - ap->link_time;
2553 if (delta > ANEG_STATE_SETTLE_TIME) {
2554 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2555 } else {
2556 ret = ANEG_TIMER_ENAB;
2557 }
2558 break;
2559
2560 case ANEG_STATE_DISABLE_LINK_OK:
2561 ret = ANEG_DONE;
2562 break;
2563
2564 case ANEG_STATE_ABILITY_DETECT_INIT:
2565 ap->flags &= ~(MR_TOGGLE_TX);
2566 ap->txconfig = ANEG_CFG_FD;
2567 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2568 if (flowctrl & ADVERTISE_1000XPAUSE)
2569 ap->txconfig |= ANEG_CFG_PS1;
2570 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2571 ap->txconfig |= ANEG_CFG_PS2;
2572 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2573 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2574 tw32_f(MAC_MODE, tp->mac_mode);
2575 udelay(40);
2576
2577 ap->state = ANEG_STATE_ABILITY_DETECT;
2578 break;
2579
2580 case ANEG_STATE_ABILITY_DETECT:
2581 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2582 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2583 }
2584 break;
2585
2586 case ANEG_STATE_ACK_DETECT_INIT:
2587 ap->txconfig |= ANEG_CFG_ACK;
2588 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2589 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2590 tw32_f(MAC_MODE, tp->mac_mode);
2591 udelay(40);
2592
2593 ap->state = ANEG_STATE_ACK_DETECT;
2594
2595 /* fallthru */
2596 case ANEG_STATE_ACK_DETECT:
2597 if (ap->ack_match != 0) {
2598 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2599 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2600 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2601 } else {
2602 ap->state = ANEG_STATE_AN_ENABLE;
2603 }
2604 } else if (ap->ability_match != 0 &&
2605 ap->rxconfig == 0) {
2606 ap->state = ANEG_STATE_AN_ENABLE;
2607 }
2608 break;
2609
2610 case ANEG_STATE_COMPLETE_ACK_INIT:
2611 if (ap->rxconfig & ANEG_CFG_INVAL) {
2612 ret = ANEG_FAILED;
2613 break;
2614 }
2615 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2616 MR_LP_ADV_HALF_DUPLEX |
2617 MR_LP_ADV_SYM_PAUSE |
2618 MR_LP_ADV_ASYM_PAUSE |
2619 MR_LP_ADV_REMOTE_FAULT1 |
2620 MR_LP_ADV_REMOTE_FAULT2 |
2621 MR_LP_ADV_NEXT_PAGE |
2622 MR_TOGGLE_RX |
2623 MR_NP_RX);
2624 if (ap->rxconfig & ANEG_CFG_FD)
2625 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2626 if (ap->rxconfig & ANEG_CFG_HD)
2627 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2628 if (ap->rxconfig & ANEG_CFG_PS1)
2629 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2630 if (ap->rxconfig & ANEG_CFG_PS2)
2631 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2632 if (ap->rxconfig & ANEG_CFG_RF1)
2633 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2634 if (ap->rxconfig & ANEG_CFG_RF2)
2635 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2636 if (ap->rxconfig & ANEG_CFG_NP)
2637 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2638
2639 ap->link_time = ap->cur_time;
2640
2641 ap->flags ^= (MR_TOGGLE_TX);
2642 if (ap->rxconfig & 0x0008)
2643 ap->flags |= MR_TOGGLE_RX;
2644 if (ap->rxconfig & ANEG_CFG_NP)
2645 ap->flags |= MR_NP_RX;
2646 ap->flags |= MR_PAGE_RX;
2647
2648 ap->state = ANEG_STATE_COMPLETE_ACK;
2649 ret = ANEG_TIMER_ENAB;
2650 break;
2651
2652 case ANEG_STATE_COMPLETE_ACK:
2653 if (ap->ability_match != 0 &&
2654 ap->rxconfig == 0) {
2655 ap->state = ANEG_STATE_AN_ENABLE;
2656 break;
2657 }
2658 delta = ap->cur_time - ap->link_time;
2659 if (delta > ANEG_STATE_SETTLE_TIME) {
2660 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2661 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2662 } else {
2663 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2664 !(ap->flags & MR_NP_RX)) {
2665 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2666 } else {
2667 ret = ANEG_FAILED;
2668 }
2669 }
2670 }
2671 break;
2672
2673 case ANEG_STATE_IDLE_DETECT_INIT:
2674 ap->link_time = ap->cur_time;
2675 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2676 tw32_f(MAC_MODE, tp->mac_mode);
2677 udelay(40);
2678
2679 ap->state = ANEG_STATE_IDLE_DETECT;
2680 ret = ANEG_TIMER_ENAB;
2681 break;
2682
2683 case ANEG_STATE_IDLE_DETECT:
2684 if (ap->ability_match != 0 &&
2685 ap->rxconfig == 0) {
2686 ap->state = ANEG_STATE_AN_ENABLE;
2687 break;
2688 }
2689 delta = ap->cur_time - ap->link_time;
2690 if (delta > ANEG_STATE_SETTLE_TIME) {
2691 /* XXX another gem from the Broadcom driver :( */
2692 ap->state = ANEG_STATE_LINK_OK;
2693 }
2694 break;
2695
2696 case ANEG_STATE_LINK_OK:
2697 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2698 ret = ANEG_DONE;
2699 break;
2700
2701 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2702 /* ??? unimplemented */
2703 break;
2704
2705 case ANEG_STATE_NEXT_PAGE_WAIT:
2706 /* ??? unimplemented */
2707 break;
2708
2709 default:
2710 ret = ANEG_FAILED;
2711 break;
2712 };
2713
2714 return ret;
2715 }
2716
2717 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2718 {
2719 int res = 0;
2720 struct tg3_fiber_aneginfo aninfo;
2721 int status = ANEG_FAILED;
2722 unsigned int tick;
2723 u32 tmp;
2724
2725 tw32_f(MAC_TX_AUTO_NEG, 0);
2726
2727 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2728 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2729 udelay(40);
2730
2731 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2732 udelay(40);
2733
2734 memset(&aninfo, 0, sizeof(aninfo));
2735 aninfo.flags |= MR_AN_ENABLE;
2736 aninfo.state = ANEG_STATE_UNKNOWN;
2737 aninfo.cur_time = 0;
2738 tick = 0;
2739 while (++tick < 195000) {
2740 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2741 if (status == ANEG_DONE || status == ANEG_FAILED)
2742 break;
2743
2744 udelay(1);
2745 }
2746
2747 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2748 tw32_f(MAC_MODE, tp->mac_mode);
2749 udelay(40);
2750
2751 *txflags = aninfo.txconfig;
2752 *rxflags = aninfo.flags;
2753
2754 if (status == ANEG_DONE &&
2755 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2756 MR_LP_ADV_FULL_DUPLEX)))
2757 res = 1;
2758
2759 return res;
2760 }
2761
2762 static void tg3_init_bcm8002(struct tg3 *tp)
2763 {
2764 u32 mac_status = tr32(MAC_STATUS);
2765 int i;
2766
2767 /* Reset when initting first time or we have a link. */
2768 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2769 !(mac_status & MAC_STATUS_PCS_SYNCED))
2770 return;
2771
2772 /* Set PLL lock range. */
2773 tg3_writephy(tp, 0x16, 0x8007);
2774
2775 /* SW reset */
2776 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2777
2778 /* Wait for reset to complete. */
2779 /* XXX schedule_timeout() ... */
2780 for (i = 0; i < 500; i++)
2781 udelay(10);
2782
2783 /* Config mode; select PMA/Ch 1 regs. */
2784 tg3_writephy(tp, 0x10, 0x8411);
2785
2786 /* Enable auto-lock and comdet, select txclk for tx. */
2787 tg3_writephy(tp, 0x11, 0x0a10);
2788
2789 tg3_writephy(tp, 0x18, 0x00a0);
2790 tg3_writephy(tp, 0x16, 0x41ff);
2791
2792 /* Assert and deassert POR. */
2793 tg3_writephy(tp, 0x13, 0x0400);
2794 udelay(40);
2795 tg3_writephy(tp, 0x13, 0x0000);
2796
2797 tg3_writephy(tp, 0x11, 0x0a50);
2798 udelay(40);
2799 tg3_writephy(tp, 0x11, 0x0a10);
2800
2801 /* Wait for signal to stabilize */
2802 /* XXX schedule_timeout() ... */
2803 for (i = 0; i < 15000; i++)
2804 udelay(10);
2805
2806 /* Deselect the channel register so we can read the PHYID
2807 * later.
2808 */
2809 tg3_writephy(tp, 0x10, 0x8011);
2810 }
2811
2812 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2813 {
2814 u16 flowctrl;
2815 u32 sg_dig_ctrl, sg_dig_status;
2816 u32 serdes_cfg, expected_sg_dig_ctrl;
2817 int workaround, port_a;
2818 int current_link_up;
2819
2820 serdes_cfg = 0;
2821 expected_sg_dig_ctrl = 0;
2822 workaround = 0;
2823 port_a = 1;
2824 current_link_up = 0;
2825
2826 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2827 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2828 workaround = 1;
2829 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2830 port_a = 0;
2831
2832 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2833 /* preserve bits 20-23 for voltage regulator */
2834 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2835 }
2836
2837 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2838
2839 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2840 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2841 if (workaround) {
2842 u32 val = serdes_cfg;
2843
2844 if (port_a)
2845 val |= 0xc010000;
2846 else
2847 val |= 0x4010000;
2848 tw32_f(MAC_SERDES_CFG, val);
2849 }
2850
2851 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2852 }
2853 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2854 tg3_setup_flow_control(tp, 0, 0);
2855 current_link_up = 1;
2856 }
2857 goto out;
2858 }
2859
2860 /* Want auto-negotiation. */
2861 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2862
2863 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2864 if (flowctrl & ADVERTISE_1000XPAUSE)
2865 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2866 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2867 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2868
2869 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2870 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2871 tp->serdes_counter &&
2872 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2873 MAC_STATUS_RCVD_CFG)) ==
2874 MAC_STATUS_PCS_SYNCED)) {
2875 tp->serdes_counter--;
2876 current_link_up = 1;
2877 goto out;
2878 }
2879 restart_autoneg:
2880 if (workaround)
2881 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2882 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2883 udelay(5);
2884 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2885
2886 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2887 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2888 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2889 MAC_STATUS_SIGNAL_DET)) {
2890 sg_dig_status = tr32(SG_DIG_STATUS);
2891 mac_status = tr32(MAC_STATUS);
2892
2893 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2894 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2895 u32 local_adv = 0, remote_adv = 0;
2896
2897 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2898 local_adv |= ADVERTISE_1000XPAUSE;
2899 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2900 local_adv |= ADVERTISE_1000XPSE_ASYM;
2901
2902 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2903 remote_adv |= LPA_1000XPAUSE;
2904 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2905 remote_adv |= LPA_1000XPAUSE_ASYM;
2906
2907 tg3_setup_flow_control(tp, local_adv, remote_adv);
2908 current_link_up = 1;
2909 tp->serdes_counter = 0;
2910 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2911 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2912 if (tp->serdes_counter)
2913 tp->serdes_counter--;
2914 else {
2915 if (workaround) {
2916 u32 val = serdes_cfg;
2917
2918 if (port_a)
2919 val |= 0xc010000;
2920 else
2921 val |= 0x4010000;
2922
2923 tw32_f(MAC_SERDES_CFG, val);
2924 }
2925
2926 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2927 udelay(40);
2928
2929 /* Link parallel detection - link is up */
2930 /* only if we have PCS_SYNC and not */
2931 /* receiving config code words */
2932 mac_status = tr32(MAC_STATUS);
2933 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2934 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2935 tg3_setup_flow_control(tp, 0, 0);
2936 current_link_up = 1;
2937 tp->tg3_flags2 |=
2938 TG3_FLG2_PARALLEL_DETECT;
2939 tp->serdes_counter =
2940 SERDES_PARALLEL_DET_TIMEOUT;
2941 } else
2942 goto restart_autoneg;
2943 }
2944 }
2945 } else {
2946 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2947 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2948 }
2949
2950 out:
2951 return current_link_up;
2952 }
2953
2954 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2955 {
2956 int current_link_up = 0;
2957
2958 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2959 goto out;
2960
2961 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2962 u32 txflags, rxflags;
2963 int i;
2964
2965 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2966 u32 local_adv = 0, remote_adv = 0;
2967
2968 if (txflags & ANEG_CFG_PS1)
2969 local_adv |= ADVERTISE_1000XPAUSE;
2970 if (txflags & ANEG_CFG_PS2)
2971 local_adv |= ADVERTISE_1000XPSE_ASYM;
2972
2973 if (rxflags & MR_LP_ADV_SYM_PAUSE)
2974 remote_adv |= LPA_1000XPAUSE;
2975 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2976 remote_adv |= LPA_1000XPAUSE_ASYM;
2977
2978 tg3_setup_flow_control(tp, local_adv, remote_adv);
2979
2980 current_link_up = 1;
2981 }
2982 for (i = 0; i < 30; i++) {
2983 udelay(20);
2984 tw32_f(MAC_STATUS,
2985 (MAC_STATUS_SYNC_CHANGED |
2986 MAC_STATUS_CFG_CHANGED));
2987 udelay(40);
2988 if ((tr32(MAC_STATUS) &
2989 (MAC_STATUS_SYNC_CHANGED |
2990 MAC_STATUS_CFG_CHANGED)) == 0)
2991 break;
2992 }
2993
2994 mac_status = tr32(MAC_STATUS);
2995 if (current_link_up == 0 &&
2996 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2997 !(mac_status & MAC_STATUS_RCVD_CFG))
2998 current_link_up = 1;
2999 } else {
3000 tg3_setup_flow_control(tp, 0, 0);
3001
3002 /* Forcing 1000FD link up. */
3003 current_link_up = 1;
3004
3005 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3006 udelay(40);
3007
3008 tw32_f(MAC_MODE, tp->mac_mode);
3009 udelay(40);
3010 }
3011
3012 out:
3013 return current_link_up;
3014 }
3015
3016 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3017 {
3018 u32 orig_pause_cfg;
3019 u16 orig_active_speed;
3020 u8 orig_active_duplex;
3021 u32 mac_status;
3022 int current_link_up;
3023 int i;
3024
3025 orig_pause_cfg = tp->link_config.active_flowctrl;
3026 orig_active_speed = tp->link_config.active_speed;
3027 orig_active_duplex = tp->link_config.active_duplex;
3028
3029 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3030 netif_carrier_ok(tp->dev) &&
3031 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3032 mac_status = tr32(MAC_STATUS);
3033 mac_status &= (MAC_STATUS_PCS_SYNCED |
3034 MAC_STATUS_SIGNAL_DET |
3035 MAC_STATUS_CFG_CHANGED |
3036 MAC_STATUS_RCVD_CFG);
3037 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3038 MAC_STATUS_SIGNAL_DET)) {
3039 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3040 MAC_STATUS_CFG_CHANGED));
3041 return 0;
3042 }
3043 }
3044
3045 tw32_f(MAC_TX_AUTO_NEG, 0);
3046
3047 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3048 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3049 tw32_f(MAC_MODE, tp->mac_mode);
3050 udelay(40);
3051
3052 if (tp->phy_id == PHY_ID_BCM8002)
3053 tg3_init_bcm8002(tp);
3054
3055 /* Enable link change event even when serdes polling. */
3056 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3057 udelay(40);
3058
3059 current_link_up = 0;
3060 mac_status = tr32(MAC_STATUS);
3061
3062 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3063 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3064 else
3065 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3066
3067 tp->hw_status->status =
3068 (SD_STATUS_UPDATED |
3069 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3070
3071 for (i = 0; i < 100; i++) {
3072 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3073 MAC_STATUS_CFG_CHANGED));
3074 udelay(5);
3075 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3076 MAC_STATUS_CFG_CHANGED |
3077 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3078 break;
3079 }
3080
3081 mac_status = tr32(MAC_STATUS);
3082 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3083 current_link_up = 0;
3084 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3085 tp->serdes_counter == 0) {
3086 tw32_f(MAC_MODE, (tp->mac_mode |
3087 MAC_MODE_SEND_CONFIGS));
3088 udelay(1);
3089 tw32_f(MAC_MODE, tp->mac_mode);
3090 }
3091 }
3092
3093 if (current_link_up == 1) {
3094 tp->link_config.active_speed = SPEED_1000;
3095 tp->link_config.active_duplex = DUPLEX_FULL;
3096 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3097 LED_CTRL_LNKLED_OVERRIDE |
3098 LED_CTRL_1000MBPS_ON));
3099 } else {
3100 tp->link_config.active_speed = SPEED_INVALID;
3101 tp->link_config.active_duplex = DUPLEX_INVALID;
3102 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3103 LED_CTRL_LNKLED_OVERRIDE |
3104 LED_CTRL_TRAFFIC_OVERRIDE));
3105 }
3106
3107 if (current_link_up != netif_carrier_ok(tp->dev)) {
3108 if (current_link_up)
3109 netif_carrier_on(tp->dev);
3110 else
3111 netif_carrier_off(tp->dev);
3112 tg3_link_report(tp);
3113 } else {
3114 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3115 if (orig_pause_cfg != now_pause_cfg ||
3116 orig_active_speed != tp->link_config.active_speed ||
3117 orig_active_duplex != tp->link_config.active_duplex)
3118 tg3_link_report(tp);
3119 }
3120
3121 return 0;
3122 }
3123
3124 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3125 {
3126 int current_link_up, err = 0;
3127 u32 bmsr, bmcr;
3128 u16 current_speed;
3129 u8 current_duplex;
3130 u32 local_adv, remote_adv;
3131
3132 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3133 tw32_f(MAC_MODE, tp->mac_mode);
3134 udelay(40);
3135
3136 tw32(MAC_EVENT, 0);
3137
3138 tw32_f(MAC_STATUS,
3139 (MAC_STATUS_SYNC_CHANGED |
3140 MAC_STATUS_CFG_CHANGED |
3141 MAC_STATUS_MI_COMPLETION |
3142 MAC_STATUS_LNKSTATE_CHANGED));
3143 udelay(40);
3144
3145 if (force_reset)
3146 tg3_phy_reset(tp);
3147
3148 current_link_up = 0;
3149 current_speed = SPEED_INVALID;
3150 current_duplex = DUPLEX_INVALID;
3151
3152 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3153 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3155 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3156 bmsr |= BMSR_LSTATUS;
3157 else
3158 bmsr &= ~BMSR_LSTATUS;
3159 }
3160
3161 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3162
3163 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3164 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3165 tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3166 /* do nothing, just check for link up at the end */
3167 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3168 u32 adv, new_adv;
3169
3170 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3171 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3172 ADVERTISE_1000XPAUSE |
3173 ADVERTISE_1000XPSE_ASYM |
3174 ADVERTISE_SLCT);
3175
3176 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3177
3178 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3179 new_adv |= ADVERTISE_1000XHALF;
3180 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3181 new_adv |= ADVERTISE_1000XFULL;
3182
3183 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3184 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3185 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3186 tg3_writephy(tp, MII_BMCR, bmcr);
3187
3188 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3189 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3190 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3191
3192 return err;
3193 }
3194 } else {
3195 u32 new_bmcr;
3196
3197 bmcr &= ~BMCR_SPEED1000;
3198 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3199
3200 if (tp->link_config.duplex == DUPLEX_FULL)
3201 new_bmcr |= BMCR_FULLDPLX;
3202
3203 if (new_bmcr != bmcr) {
3204 /* BMCR_SPEED1000 is a reserved bit that needs
3205 * to be set on write.
3206 */
3207 new_bmcr |= BMCR_SPEED1000;
3208
3209 /* Force a linkdown */
3210 if (netif_carrier_ok(tp->dev)) {
3211 u32 adv;
3212
3213 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3214 adv &= ~(ADVERTISE_1000XFULL |
3215 ADVERTISE_1000XHALF |
3216 ADVERTISE_SLCT);
3217 tg3_writephy(tp, MII_ADVERTISE, adv);
3218 tg3_writephy(tp, MII_BMCR, bmcr |
3219 BMCR_ANRESTART |
3220 BMCR_ANENABLE);
3221 udelay(10);
3222 netif_carrier_off(tp->dev);
3223 }
3224 tg3_writephy(tp, MII_BMCR, new_bmcr);
3225 bmcr = new_bmcr;
3226 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3227 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3228 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3229 ASIC_REV_5714) {
3230 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3231 bmsr |= BMSR_LSTATUS;
3232 else
3233 bmsr &= ~BMSR_LSTATUS;
3234 }
3235 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3236 }
3237 }
3238
3239 if (bmsr & BMSR_LSTATUS) {
3240 current_speed = SPEED_1000;
3241 current_link_up = 1;
3242 if (bmcr & BMCR_FULLDPLX)
3243 current_duplex = DUPLEX_FULL;
3244 else
3245 current_duplex = DUPLEX_HALF;
3246
3247 local_adv = 0;
3248 remote_adv = 0;
3249
3250 if (bmcr & BMCR_ANENABLE) {
3251 u32 common;
3252
3253 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3254 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3255 common = local_adv & remote_adv;
3256 if (common & (ADVERTISE_1000XHALF |
3257 ADVERTISE_1000XFULL)) {
3258 if (common & ADVERTISE_1000XFULL)
3259 current_duplex = DUPLEX_FULL;
3260 else
3261 current_duplex = DUPLEX_HALF;
3262 }
3263 else
3264 current_link_up = 0;
3265 }
3266 }
3267
3268 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3269 tg3_setup_flow_control(tp, local_adv, remote_adv);
3270
3271 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3272 if (tp->link_config.active_duplex == DUPLEX_HALF)
3273 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3274
3275 tw32_f(MAC_MODE, tp->mac_mode);
3276 udelay(40);
3277
3278 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3279
3280 tp->link_config.active_speed = current_speed;
3281 tp->link_config.active_duplex = current_duplex;
3282
3283 if (current_link_up != netif_carrier_ok(tp->dev)) {
3284 if (current_link_up)
3285 netif_carrier_on(tp->dev);
3286 else {
3287 netif_carrier_off(tp->dev);
3288 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3289 }
3290 tg3_link_report(tp);
3291 }
3292 return err;
3293 }
3294
3295 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3296 {
3297 if (tp->serdes_counter) {
3298 /* Give autoneg time to complete. */
3299 tp->serdes_counter--;
3300 return;
3301 }
3302 if (!netif_carrier_ok(tp->dev) &&
3303 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3304 u32 bmcr;
3305
3306 tg3_readphy(tp, MII_BMCR, &bmcr);
3307 if (bmcr & BMCR_ANENABLE) {
3308 u32 phy1, phy2;
3309
3310 /* Select shadow register 0x1f */
3311 tg3_writephy(tp, 0x1c, 0x7c00);
3312 tg3_readphy(tp, 0x1c, &phy1);
3313
3314 /* Select expansion interrupt status register */
3315 tg3_writephy(tp, 0x17, 0x0f01);
3316 tg3_readphy(tp, 0x15, &phy2);
3317 tg3_readphy(tp, 0x15, &phy2);
3318
3319 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3320 /* We have signal detect and not receiving
3321 * config code words, link is up by parallel
3322 * detection.
3323 */
3324
3325 bmcr &= ~BMCR_ANENABLE;
3326 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3327 tg3_writephy(tp, MII_BMCR, bmcr);
3328 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3329 }
3330 }
3331 }
3332 else if (netif_carrier_ok(tp->dev) &&
3333 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3334 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3335 u32 phy2;
3336
3337 /* Select expansion interrupt status register */
3338 tg3_writephy(tp, 0x17, 0x0f01);
3339 tg3_readphy(tp, 0x15, &phy2);
3340 if (phy2 & 0x20) {
3341 u32 bmcr;
3342
3343 /* Config code words received, turn on autoneg. */
3344 tg3_readphy(tp, MII_BMCR, &bmcr);
3345 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3346
3347 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3348
3349 }
3350 }
3351 }
3352
3353 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3354 {
3355 int err;
3356
3357 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3358 err = tg3_setup_fiber_phy(tp, force_reset);
3359 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3360 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3361 } else {
3362 err = tg3_setup_copper_phy(tp, force_reset);
3363 }
3364
3365 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3366 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3367 u32 val, scale;
3368
3369 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3370 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3371 scale = 65;
3372 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3373 scale = 6;
3374 else
3375 scale = 12;
3376
3377 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3378 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3379 tw32(GRC_MISC_CFG, val);
3380 }
3381
3382 if (tp->link_config.active_speed == SPEED_1000 &&
3383 tp->link_config.active_duplex == DUPLEX_HALF)
3384 tw32(MAC_TX_LENGTHS,
3385 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3386 (6 << TX_LENGTHS_IPG_SHIFT) |
3387 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3388 else
3389 tw32(MAC_TX_LENGTHS,
3390 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3391 (6 << TX_LENGTHS_IPG_SHIFT) |
3392 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3393
3394 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3395 if (netif_carrier_ok(tp->dev)) {
3396 tw32(HOSTCC_STAT_COAL_TICKS,
3397 tp->coal.stats_block_coalesce_usecs);
3398 } else {
3399 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3400 }
3401 }
3402
3403 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3404 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3405 if (!netif_carrier_ok(tp->dev))
3406 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3407 tp->pwrmgmt_thresh;
3408 else
3409 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3410 tw32(PCIE_PWR_MGMT_THRESH, val);
3411 }
3412
3413 return err;
3414 }
3415
3416 /* This is called whenever we suspect that the system chipset is re-
3417 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3418 * is bogus tx completions. We try to recover by setting the
3419 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3420 * in the workqueue.
3421 */
3422 static void tg3_tx_recover(struct tg3 *tp)
3423 {
3424 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3425 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3426
3427 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3428 "mapped I/O cycles to the network device, attempting to "
3429 "recover. Please report the problem to the driver maintainer "
3430 "and include system chipset information.\n", tp->dev->name);
3431
3432 spin_lock(&tp->lock);
3433 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3434 spin_unlock(&tp->lock);
3435 }
3436
3437 static inline u32 tg3_tx_avail(struct tg3 *tp)
3438 {
3439 smp_mb();
3440 return (tp->tx_pending -
3441 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3442 }
3443
3444 /* Tigon3 never reports partial packet sends. So we do not
3445 * need special logic to handle SKBs that have not had all
3446 * of their frags sent yet, like SunGEM does.
3447 */
3448 static void tg3_tx(struct tg3 *tp)
3449 {
3450 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3451 u32 sw_idx = tp->tx_cons;
3452
3453 while (sw_idx != hw_idx) {
3454 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3455 struct sk_buff *skb = ri->skb;
3456 int i, tx_bug = 0;
3457
3458 if (unlikely(skb == NULL)) {
3459 tg3_tx_recover(tp);
3460 return;
3461 }
3462
3463 pci_unmap_single(tp->pdev,
3464 pci_unmap_addr(ri, mapping),
3465 skb_headlen(skb),
3466 PCI_DMA_TODEVICE);
3467
3468 ri->skb = NULL;
3469
3470 sw_idx = NEXT_TX(sw_idx);
3471
3472 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3473 ri = &tp->tx_buffers[sw_idx];
3474 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3475 tx_bug = 1;
3476
3477 pci_unmap_page(tp->pdev,
3478 pci_unmap_addr(ri, mapping),
3479 skb_shinfo(skb)->frags[i].size,
3480 PCI_DMA_TODEVICE);
3481
3482 sw_idx = NEXT_TX(sw_idx);
3483 }
3484
3485 dev_kfree_skb(skb);
3486
3487 if (unlikely(tx_bug)) {
3488 tg3_tx_recover(tp);
3489 return;
3490 }
3491 }
3492
3493 tp->tx_cons = sw_idx;
3494
3495 /* Need to make the tx_cons update visible to tg3_start_xmit()
3496 * before checking for netif_queue_stopped(). Without the
3497 * memory barrier, there is a small possibility that tg3_start_xmit()
3498 * will miss it and cause the queue to be stopped forever.
3499 */
3500 smp_mb();
3501
3502 if (unlikely(netif_queue_stopped(tp->dev) &&
3503 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3504 netif_tx_lock(tp->dev);
3505 if (netif_queue_stopped(tp->dev) &&
3506 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3507 netif_wake_queue(tp->dev);
3508 netif_tx_unlock(tp->dev);
3509 }
3510 }
3511
3512 /* Returns size of skb allocated or < 0 on error.
3513 *
3514 * We only need to fill in the address because the other members
3515 * of the RX descriptor are invariant, see tg3_init_rings.
3516 *
3517 * Note the purposeful assymetry of cpu vs. chip accesses. For
3518 * posting buffers we only dirty the first cache line of the RX
3519 * descriptor (containing the address). Whereas for the RX status
3520 * buffers the cpu only reads the last cacheline of the RX descriptor
3521 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3522 */
3523 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3524 int src_idx, u32 dest_idx_unmasked)
3525 {
3526 struct tg3_rx_buffer_desc *desc;
3527 struct ring_info *map, *src_map;
3528 struct sk_buff *skb;
3529 dma_addr_t mapping;
3530 int skb_size, dest_idx;
3531
3532 src_map = NULL;
3533 switch (opaque_key) {
3534 case RXD_OPAQUE_RING_STD:
3535 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3536 desc = &tp->rx_std[dest_idx];
3537 map = &tp->rx_std_buffers[dest_idx];
3538 if (src_idx >= 0)
3539 src_map = &tp->rx_std_buffers[src_idx];
3540 skb_size = tp->rx_pkt_buf_sz;
3541 break;
3542
3543 case RXD_OPAQUE_RING_JUMBO:
3544 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3545 desc = &tp->rx_jumbo[dest_idx];
3546 map = &tp->rx_jumbo_buffers[dest_idx];
3547 if (src_idx >= 0)
3548 src_map = &tp->rx_jumbo_buffers[src_idx];
3549 skb_size = RX_JUMBO_PKT_BUF_SZ;
3550 break;
3551
3552 default:
3553 return -EINVAL;
3554 };
3555
3556 /* Do not overwrite any of the map or rp information
3557 * until we are sure we can commit to a new buffer.
3558 *
3559 * Callers depend upon this behavior and assume that
3560 * we leave everything unchanged if we fail.
3561 */
3562 skb = netdev_alloc_skb(tp->dev, skb_size);
3563 if (skb == NULL)
3564 return -ENOMEM;
3565
3566 skb_reserve(skb, tp->rx_offset);
3567
3568 mapping = pci_map_single(tp->pdev, skb->data,
3569 skb_size - tp->rx_offset,
3570 PCI_DMA_FROMDEVICE);
3571
3572 map->skb = skb;
3573 pci_unmap_addr_set(map, mapping, mapping);
3574
3575 if (src_map != NULL)
3576 src_map->skb = NULL;
3577
3578 desc->addr_hi = ((u64)mapping >> 32);
3579 desc->addr_lo = ((u64)mapping & 0xffffffff);
3580
3581 return skb_size;
3582 }
3583
3584 /* We only need to move over in the address because the other
3585 * members of the RX descriptor are invariant. See notes above
3586 * tg3_alloc_rx_skb for full details.
3587 */
3588 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3589 int src_idx, u32 dest_idx_unmasked)
3590 {
3591 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3592 struct ring_info *src_map, *dest_map;
3593 int dest_idx;
3594
3595 switch (opaque_key) {
3596 case RXD_OPAQUE_RING_STD:
3597 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3598 dest_desc = &tp->rx_std[dest_idx];
3599 dest_map = &tp->rx_std_buffers[dest_idx];
3600 src_desc = &tp->rx_std[src_idx];
3601 src_map = &tp->rx_std_buffers[src_idx];
3602 break;
3603
3604 case RXD_OPAQUE_RING_JUMBO:
3605 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3606 dest_desc = &tp->rx_jumbo[dest_idx];
3607 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3608 src_desc = &tp->rx_jumbo[src_idx];
3609 src_map = &tp->rx_jumbo_buffers[src_idx];
3610 break;
3611
3612 default:
3613 return;
3614 };
3615
3616 dest_map->skb = src_map->skb;
3617 pci_unmap_addr_set(dest_map, mapping,
3618 pci_unmap_addr(src_map, mapping));
3619 dest_desc->addr_hi = src_desc->addr_hi;
3620 dest_desc->addr_lo = src_desc->addr_lo;
3621
3622 src_map->skb = NULL;
3623 }
3624
3625 #if TG3_VLAN_TAG_USED
3626 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3627 {
3628 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3629 }
3630 #endif
3631
3632 /* The RX ring scheme is composed of multiple rings which post fresh
3633 * buffers to the chip, and one special ring the chip uses to report
3634 * status back to the host.
3635 *
3636 * The special ring reports the status of received packets to the
3637 * host. The chip does not write into the original descriptor the
3638 * RX buffer was obtained from. The chip simply takes the original
3639 * descriptor as provided by the host, updates the status and length
3640 * field, then writes this into the next status ring entry.
3641 *
3642 * Each ring the host uses to post buffers to the chip is described
3643 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3644 * it is first placed into the on-chip ram. When the packet's length
3645 * is known, it walks down the TG3_BDINFO entries to select the ring.
3646 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3647 * which is within the range of the new packet's length is chosen.
3648 *
3649 * The "separate ring for rx status" scheme may sound queer, but it makes
3650 * sense from a cache coherency perspective. If only the host writes
3651 * to the buffer post rings, and only the chip writes to the rx status
3652 * rings, then cache lines never move beyond shared-modified state.
3653 * If both the host and chip were to write into the same ring, cache line
3654 * eviction could occur since both entities want it in an exclusive state.
3655 */
3656 static int tg3_rx(struct tg3 *tp, int budget)
3657 {
3658 u32 work_mask, rx_std_posted = 0;
3659 u32 sw_idx = tp->rx_rcb_ptr;
3660 u16 hw_idx;
3661 int received;
3662
3663 hw_idx = tp->hw_status->idx[0].rx_producer;
3664 /*
3665 * We need to order the read of hw_idx and the read of
3666 * the opaque cookie.
3667 */
3668 rmb();
3669 work_mask = 0;
3670 received = 0;
3671 while (sw_idx != hw_idx && budget > 0) {
3672 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3673 unsigned int len;
3674 struct sk_buff *skb;
3675 dma_addr_t dma_addr;
3676 u32 opaque_key, desc_idx, *post_ptr;
3677
3678 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3679 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3680 if (opaque_key == RXD_OPAQUE_RING_STD) {
3681 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3682 mapping);
3683 skb = tp->rx_std_buffers[desc_idx].skb;
3684 post_ptr = &tp->rx_std_ptr;
3685 rx_std_posted++;
3686 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3687 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3688 mapping);
3689 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3690 post_ptr = &tp->rx_jumbo_ptr;
3691 }
3692 else {
3693 goto next_pkt_nopost;
3694 }
3695
3696 work_mask |= opaque_key;
3697
3698 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3699 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3700 drop_it:
3701 tg3_recycle_rx(tp, opaque_key,
3702 desc_idx, *post_ptr);
3703 drop_it_no_recycle:
3704 /* Other statistics kept track of by card. */
3705 tp->net_stats.rx_dropped++;
3706 goto next_pkt;
3707 }
3708
3709 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3710
3711 if (len > RX_COPY_THRESHOLD
3712 && tp->rx_offset == 2
3713 /* rx_offset != 2 iff this is a 5701 card running
3714 * in PCI-X mode [see tg3_get_invariants()] */
3715 ) {
3716 int skb_size;
3717
3718 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3719 desc_idx, *post_ptr);
3720 if (skb_size < 0)
3721 goto drop_it;
3722
3723 pci_unmap_single(tp->pdev, dma_addr,
3724 skb_size - tp->rx_offset,
3725 PCI_DMA_FROMDEVICE);
3726
3727 skb_put(skb, len);
3728 } else {
3729 struct sk_buff *copy_skb;
3730
3731 tg3_recycle_rx(tp, opaque_key,
3732 desc_idx, *post_ptr);
3733
3734 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3735 if (copy_skb == NULL)
3736 goto drop_it_no_recycle;
3737
3738 skb_reserve(copy_skb, 2);
3739 skb_put(copy_skb, len);
3740 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3741 skb_copy_from_linear_data(skb, copy_skb->data, len);
3742 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3743
3744 /* We'll reuse the original ring buffer. */
3745 skb = copy_skb;
3746 }
3747
3748 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3749 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3750 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3751 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3752 skb->ip_summed = CHECKSUM_UNNECESSARY;
3753 else
3754 skb->ip_summed = CHECKSUM_NONE;
3755
3756 skb->protocol = eth_type_trans(skb, tp->dev);
3757 #if TG3_VLAN_TAG_USED
3758 if (tp->vlgrp != NULL &&
3759 desc->type_flags & RXD_FLAG_VLAN) {
3760 tg3_vlan_rx(tp, skb,
3761 desc->err_vlan & RXD_VLAN_MASK);
3762 } else
3763 #endif
3764 netif_receive_skb(skb);
3765
3766 tp->dev->last_rx = jiffies;
3767 received++;
3768 budget--;
3769
3770 next_pkt:
3771 (*post_ptr)++;
3772
3773 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3774 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3775
3776 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3777 TG3_64BIT_REG_LOW, idx);
3778 work_mask &= ~RXD_OPAQUE_RING_STD;
3779 rx_std_posted = 0;
3780 }
3781 next_pkt_nopost:
3782 sw_idx++;
3783 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3784
3785 /* Refresh hw_idx to see if there is new work */
3786 if (sw_idx == hw_idx) {
3787 hw_idx = tp->hw_status->idx[0].rx_producer;
3788 rmb();
3789 }
3790 }
3791
3792 /* ACK the status ring. */
3793 tp->rx_rcb_ptr = sw_idx;
3794 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3795
3796 /* Refill RX ring(s). */
3797 if (work_mask & RXD_OPAQUE_RING_STD) {
3798 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3799 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3800 sw_idx);
3801 }
3802 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3803 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3804 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3805 sw_idx);
3806 }
3807 mmiowb();
3808
3809 return received;
3810 }
3811
3812 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3813 {
3814 struct tg3_hw_status *sblk = tp->hw_status;
3815
3816 /* handle link change and other phy events */
3817 if (!(tp->tg3_flags &
3818 (TG3_FLAG_USE_LINKCHG_REG |
3819 TG3_FLAG_POLL_SERDES))) {
3820 if (sblk->status & SD_STATUS_LINK_CHG) {
3821 sblk->status = SD_STATUS_UPDATED |
3822 (sblk->status & ~SD_STATUS_LINK_CHG);
3823 spin_lock(&tp->lock);
3824 tg3_setup_phy(tp, 0);
3825 spin_unlock(&tp->lock);
3826 }
3827 }
3828
3829 /* run TX completion thread */
3830 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3831 tg3_tx(tp);
3832 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3833 return work_done;
3834 }
3835
3836 /* run RX thread, within the bounds set by NAPI.
3837 * All RX "locking" is done by ensuring outside
3838 * code synchronizes with tg3->napi.poll()
3839 */
3840 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3841 work_done += tg3_rx(tp, budget - work_done);
3842
3843 return work_done;
3844 }
3845
3846 static int tg3_poll(struct napi_struct *napi, int budget)
3847 {
3848 struct tg3 *tp = container_of(napi, struct tg3, napi);
3849 int work_done = 0;
3850 struct tg3_hw_status *sblk = tp->hw_status;
3851
3852 while (1) {
3853 work_done = tg3_poll_work(tp, work_done, budget);
3854
3855 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3856 goto tx_recovery;
3857
3858 if (unlikely(work_done >= budget))
3859 break;
3860
3861 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3862 /* tp->last_tag is used in tg3_restart_ints() below
3863 * to tell the hw how much work has been processed,
3864 * so we must read it before checking for more work.
3865 */
3866 tp->last_tag = sblk->status_tag;
3867 rmb();
3868 } else
3869 sblk->status &= ~SD_STATUS_UPDATED;
3870
3871 if (likely(!tg3_has_work(tp))) {
3872 netif_rx_complete(tp->dev, napi);
3873 tg3_restart_ints(tp);
3874 break;
3875 }
3876 }
3877
3878 return work_done;
3879
3880 tx_recovery:
3881 /* work_done is guaranteed to be less than budget. */
3882 netif_rx_complete(tp->dev, napi);
3883 schedule_work(&tp->reset_task);
3884 return work_done;
3885 }
3886
3887 static void tg3_irq_quiesce(struct tg3 *tp)
3888 {
3889 BUG_ON(tp->irq_sync);
3890
3891 tp->irq_sync = 1;
3892 smp_mb();
3893
3894 synchronize_irq(tp->pdev->irq);
3895 }
3896
3897 static inline int tg3_irq_sync(struct tg3 *tp)
3898 {
3899 return tp->irq_sync;
3900 }
3901
3902 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3903 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3904 * with as well. Most of the time, this is not necessary except when
3905 * shutting down the device.
3906 */
3907 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3908 {
3909 spin_lock_bh(&tp->lock);
3910 if (irq_sync)
3911 tg3_irq_quiesce(tp);
3912 }
3913
3914 static inline void tg3_full_unlock(struct tg3 *tp)
3915 {
3916 spin_unlock_bh(&tp->lock);
3917 }
3918
3919 /* One-shot MSI handler - Chip automatically disables interrupt
3920 * after sending MSI so driver doesn't have to do it.
3921 */
3922 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3923 {
3924 struct net_device *dev = dev_id;
3925 struct tg3 *tp = netdev_priv(dev);
3926
3927 prefetch(tp->hw_status);
3928 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3929
3930 if (likely(!tg3_irq_sync(tp)))
3931 netif_rx_schedule(dev, &tp->napi);
3932
3933 return IRQ_HANDLED;
3934 }
3935
3936 /* MSI ISR - No need to check for interrupt sharing and no need to
3937 * flush status block and interrupt mailbox. PCI ordering rules
3938 * guarantee that MSI will arrive after the status block.
3939 */
3940 static irqreturn_t tg3_msi(int irq, void *dev_id)
3941 {
3942 struct net_device *dev = dev_id;
3943 struct tg3 *tp = netdev_priv(dev);
3944
3945 prefetch(tp->hw_status);
3946 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3947 /*
3948 * Writing any value to intr-mbox-0 clears PCI INTA# and
3949 * chip-internal interrupt pending events.
3950 * Writing non-zero to intr-mbox-0 additional tells the
3951 * NIC to stop sending us irqs, engaging "in-intr-handler"
3952 * event coalescing.
3953 */
3954 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3955 if (likely(!tg3_irq_sync(tp)))
3956 netif_rx_schedule(dev, &tp->napi);
3957
3958 return IRQ_RETVAL(1);
3959 }
3960
3961 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3962 {
3963 struct net_device *dev = dev_id;
3964 struct tg3 *tp = netdev_priv(dev);
3965 struct tg3_hw_status *sblk = tp->hw_status;
3966 unsigned int handled = 1;
3967
3968 /* In INTx mode, it is possible for the interrupt to arrive at
3969 * the CPU before the status block posted prior to the interrupt.
3970 * Reading the PCI State register will confirm whether the
3971 * interrupt is ours and will flush the status block.
3972 */
3973 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3974 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3975 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3976 handled = 0;
3977 goto out;
3978 }
3979 }
3980
3981 /*
3982 * Writing any value to intr-mbox-0 clears PCI INTA# and
3983 * chip-internal interrupt pending events.
3984 * Writing non-zero to intr-mbox-0 additional tells the
3985 * NIC to stop sending us irqs, engaging "in-intr-handler"
3986 * event coalescing.
3987 *
3988 * Flush the mailbox to de-assert the IRQ immediately to prevent
3989 * spurious interrupts. The flush impacts performance but
3990 * excessive spurious interrupts can be worse in some cases.
3991 */
3992 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3993 if (tg3_irq_sync(tp))
3994 goto out;
3995 sblk->status &= ~SD_STATUS_UPDATED;
3996 if (likely(tg3_has_work(tp))) {
3997 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3998 netif_rx_schedule(dev, &tp->napi);
3999 } else {
4000 /* No work, shared interrupt perhaps? re-enable
4001 * interrupts, and flush that PCI write
4002 */
4003 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4004 0x00000000);
4005 }
4006 out:
4007 return IRQ_RETVAL(handled);
4008 }
4009
4010 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4011 {
4012 struct net_device *dev = dev_id;
4013 struct tg3 *tp = netdev_priv(dev);
4014 struct tg3_hw_status *sblk = tp->hw_status;
4015 unsigned int handled = 1;
4016
4017 /* In INTx mode, it is possible for the interrupt to arrive at
4018 * the CPU before the status block posted prior to the interrupt.
4019 * Reading the PCI State register will confirm whether the
4020 * interrupt is ours and will flush the status block.
4021 */
4022 if (unlikely(sblk->status_tag == tp->last_tag)) {
4023 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4024 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4025 handled = 0;
4026 goto out;
4027 }
4028 }
4029
4030 /*
4031 * writing any value to intr-mbox-0 clears PCI INTA# and
4032 * chip-internal interrupt pending events.
4033 * writing non-zero to intr-mbox-0 additional tells the
4034 * NIC to stop sending us irqs, engaging "in-intr-handler"
4035 * event coalescing.
4036 *
4037 * Flush the mailbox to de-assert the IRQ immediately to prevent
4038 * spurious interrupts. The flush impacts performance but
4039 * excessive spurious interrupts can be worse in some cases.
4040 */
4041 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4042 if (tg3_irq_sync(tp))
4043 goto out;
4044 if (netif_rx_schedule_prep(dev, &tp->napi)) {
4045 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4046 /* Update last_tag to mark that this status has been
4047 * seen. Because interrupt may be shared, we may be
4048 * racing with tg3_poll(), so only update last_tag
4049 * if tg3_poll() is not scheduled.
4050 */
4051 tp->last_tag = sblk->status_tag;
4052 __netif_rx_schedule(dev, &tp->napi);
4053 }
4054 out:
4055 return IRQ_RETVAL(handled);
4056 }
4057
4058 /* ISR for interrupt test */
4059 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4060 {
4061 struct net_device *dev = dev_id;
4062 struct tg3 *tp = netdev_priv(dev);
4063 struct tg3_hw_status *sblk = tp->hw_status;
4064
4065 if ((sblk->status & SD_STATUS_UPDATED) ||
4066 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4067 tg3_disable_ints(tp);
4068 return IRQ_RETVAL(1);
4069 }
4070 return IRQ_RETVAL(0);
4071 }
4072
4073 static int tg3_init_hw(struct tg3 *, int);
4074 static int tg3_halt(struct tg3 *, int, int);
4075
4076 /* Restart hardware after configuration changes, self-test, etc.
4077 * Invoked with tp->lock held.
4078 */
4079 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4080 __releases(tp->lock)
4081 __acquires(tp->lock)
4082 {
4083 int err;
4084
4085 err = tg3_init_hw(tp, reset_phy);
4086 if (err) {
4087 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4088 "aborting.\n", tp->dev->name);
4089 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4090 tg3_full_unlock(tp);
4091 del_timer_sync(&tp->timer);
4092 tp->irq_sync = 0;
4093 napi_enable(&tp->napi);
4094 dev_close(tp->dev);
4095 tg3_full_lock(tp, 0);
4096 }
4097 return err;
4098 }
4099
4100 #ifdef CONFIG_NET_POLL_CONTROLLER
4101 static void tg3_poll_controller(struct net_device *dev)
4102 {
4103 struct tg3 *tp = netdev_priv(dev);
4104
4105 tg3_interrupt(tp->pdev->irq, dev);
4106 }
4107 #endif
4108
4109 static void tg3_reset_task(struct work_struct *work)
4110 {
4111 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4112 unsigned int restart_timer;
4113
4114 tg3_full_lock(tp, 0);
4115
4116 if (!netif_running(tp->dev)) {
4117 tg3_full_unlock(tp);
4118 return;
4119 }
4120
4121 tg3_full_unlock(tp);
4122
4123 tg3_netif_stop(tp);
4124
4125 tg3_full_lock(tp, 1);
4126
4127 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4128 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4129
4130 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4131 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4132 tp->write32_rx_mbox = tg3_write_flush_reg32;
4133 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4134 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4135 }
4136
4137 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4138 if (tg3_init_hw(tp, 1))
4139 goto out;
4140
4141 tg3_netif_start(tp);
4142
4143 if (restart_timer)
4144 mod_timer(&tp->timer, jiffies + 1);
4145
4146 out:
4147 tg3_full_unlock(tp);
4148 }
4149
4150 static void tg3_dump_short_state(struct tg3 *tp)
4151 {
4152 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4153 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4154 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4155 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4156 }
4157
4158 static void tg3_tx_timeout(struct net_device *dev)
4159 {
4160 struct tg3 *tp = netdev_priv(dev);
4161
4162 if (netif_msg_tx_err(tp)) {
4163 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4164 dev->name);
4165 tg3_dump_short_state(tp);
4166 }
4167
4168 schedule_work(&tp->reset_task);
4169 }
4170
4171 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4172 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4173 {
4174 u32 base = (u32) mapping & 0xffffffff;
4175
4176 return ((base > 0xffffdcc0) &&
4177 (base + len + 8 < base));
4178 }
4179
4180 /* Test for DMA addresses > 40-bit */
4181 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4182 int len)
4183 {
4184 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4185 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4186 return (((u64) mapping + len) > DMA_40BIT_MASK);
4187 return 0;
4188 #else
4189 return 0;
4190 #endif
4191 }
4192
4193 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4194
4195 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4196 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4197 u32 last_plus_one, u32 *start,
4198 u32 base_flags, u32 mss)
4199 {
4200 struct sk_buff *new_skb;
4201 dma_addr_t new_addr = 0;
4202 u32 entry = *start;
4203 int i, ret = 0;
4204
4205 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4206 new_skb = skb_copy(skb, GFP_ATOMIC);
4207 else {
4208 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4209
4210 new_skb = skb_copy_expand(skb,
4211 skb_headroom(skb) + more_headroom,
4212 skb_tailroom(skb), GFP_ATOMIC);
4213 }
4214
4215 if (!new_skb) {
4216 ret = -1;
4217 } else {
4218 /* New SKB is guaranteed to be linear. */
4219 entry = *start;
4220 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4221 PCI_DMA_TODEVICE);
4222 /* Make sure new skb does not cross any 4G boundaries.
4223 * Drop the packet if it does.
4224 */
4225 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4226 ret = -1;
4227 dev_kfree_skb(new_skb);
4228 new_skb = NULL;
4229 } else {
4230 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4231 base_flags, 1 | (mss << 1));
4232 *start = NEXT_TX(entry);
4233 }
4234 }
4235
4236 /* Now clean up the sw ring entries. */
4237 i = 0;
4238 while (entry != last_plus_one) {
4239 int len;
4240
4241 if (i == 0)
4242 len = skb_headlen(skb);
4243 else
4244 len = skb_shinfo(skb)->frags[i-1].size;
4245 pci_unmap_single(tp->pdev,
4246 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4247 len, PCI_DMA_TODEVICE);
4248 if (i == 0) {
4249 tp->tx_buffers[entry].skb = new_skb;
4250 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4251 } else {
4252 tp->tx_buffers[entry].skb = NULL;
4253 }
4254 entry = NEXT_TX(entry);
4255 i++;
4256 }
4257
4258 dev_kfree_skb(skb);
4259
4260 return ret;
4261 }
4262
4263 static void tg3_set_txd(struct tg3 *tp, int entry,
4264 dma_addr_t mapping, int len, u32 flags,
4265 u32 mss_and_is_end)
4266 {
4267 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4268 int is_end = (mss_and_is_end & 0x1);
4269 u32 mss = (mss_and_is_end >> 1);
4270 u32 vlan_tag = 0;
4271
4272 if (is_end)
4273 flags |= TXD_FLAG_END;
4274 if (flags & TXD_FLAG_VLAN) {
4275 vlan_tag = flags >> 16;
4276 flags &= 0xffff;
4277 }
4278 vlan_tag |= (mss << TXD_MSS_SHIFT);
4279
4280 txd->addr_hi = ((u64) mapping >> 32);
4281 txd->addr_lo = ((u64) mapping & 0xffffffff);
4282 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4283 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4284 }
4285
4286 /* hard_start_xmit for devices that don't have any bugs and
4287 * support TG3_FLG2_HW_TSO_2 only.
4288 */
4289 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4290 {
4291 struct tg3 *tp = netdev_priv(dev);
4292 dma_addr_t mapping;
4293 u32 len, entry, base_flags, mss;
4294
4295 len = skb_headlen(skb);
4296
4297 /* We are running in BH disabled context with netif_tx_lock
4298 * and TX reclaim runs via tp->napi.poll inside of a software
4299 * interrupt. Furthermore, IRQ processing runs lockless so we have
4300 * no IRQ context deadlocks to worry about either. Rejoice!
4301 */
4302 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4303 if (!netif_queue_stopped(dev)) {
4304 netif_stop_queue(dev);
4305
4306 /* This is a hard error, log it. */
4307 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4308 "queue awake!\n", dev->name);
4309 }
4310 return NETDEV_TX_BUSY;
4311 }
4312
4313 entry = tp->tx_prod;
4314 base_flags = 0;
4315 mss = 0;
4316 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4317 int tcp_opt_len, ip_tcp_len;
4318
4319 if (skb_header_cloned(skb) &&
4320 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4321 dev_kfree_skb(skb);
4322 goto out_unlock;
4323 }
4324
4325 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4326 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4327 else {
4328 struct iphdr *iph = ip_hdr(skb);
4329
4330 tcp_opt_len = tcp_optlen(skb);
4331 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4332
4333 iph->check = 0;
4334 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4335 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4336 }
4337
4338 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4339 TXD_FLAG_CPU_POST_DMA);
4340
4341 tcp_hdr(skb)->check = 0;
4342
4343 }
4344 else if (skb->ip_summed == CHECKSUM_PARTIAL)
4345 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4346 #if TG3_VLAN_TAG_USED
4347 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4348 base_flags |= (TXD_FLAG_VLAN |
4349 (vlan_tx_tag_get(skb) << 16));
4350 #endif
4351
4352 /* Queue skb data, a.k.a. the main skb fragment. */
4353 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4354
4355 tp->tx_buffers[entry].skb = skb;
4356 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4357
4358 tg3_set_txd(tp, entry, mapping, len, base_flags,
4359 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4360
4361 entry = NEXT_TX(entry);
4362
4363 /* Now loop through additional data fragments, and queue them. */
4364 if (skb_shinfo(skb)->nr_frags > 0) {
4365 unsigned int i, last;
4366
4367 last = skb_shinfo(skb)->nr_frags - 1;
4368 for (i = 0; i <= last; i++) {
4369 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4370
4371 len = frag->size;
4372 mapping = pci_map_page(tp->pdev,
4373 frag->page,
4374 frag->page_offset,
4375 len, PCI_DMA_TODEVICE);
4376
4377 tp->tx_buffers[entry].skb = NULL;
4378 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4379
4380 tg3_set_txd(tp, entry, mapping, len,
4381 base_flags, (i == last) | (mss << 1));
4382
4383 entry = NEXT_TX(entry);
4384 }
4385 }
4386
4387 /* Packets are ready, update Tx producer idx local and on card. */
4388 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4389
4390 tp->tx_prod = entry;
4391 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4392 netif_stop_queue(dev);
4393 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4394 netif_wake_queue(tp->dev);
4395 }
4396
4397 out_unlock:
4398 mmiowb();
4399
4400 dev->trans_start = jiffies;
4401
4402 return NETDEV_TX_OK;
4403 }
4404
4405 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4406
4407 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4408 * TSO header is greater than 80 bytes.
4409 */
4410 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4411 {
4412 struct sk_buff *segs, *nskb;
4413
4414 /* Estimate the number of fragments in the worst case */
4415 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4416 netif_stop_queue(tp->dev);
4417 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4418 return NETDEV_TX_BUSY;
4419
4420 netif_wake_queue(tp->dev);
4421 }
4422
4423 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4424 if (IS_ERR(segs))
4425 goto tg3_tso_bug_end;
4426
4427 do {
4428 nskb = segs;
4429 segs = segs->next;
4430 nskb->next = NULL;
4431 tg3_start_xmit_dma_bug(nskb, tp->dev);
4432 } while (segs);
4433
4434 tg3_tso_bug_end:
4435 dev_kfree_skb(skb);
4436
4437 return NETDEV_TX_OK;
4438 }
4439
4440 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4441 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4442 */
4443 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4444 {
4445 struct tg3 *tp = netdev_priv(dev);
4446 dma_addr_t mapping;
4447 u32 len, entry, base_flags, mss;
4448 int would_hit_hwbug;
4449
4450 len = skb_headlen(skb);
4451
4452 /* We are running in BH disabled context with netif_tx_lock
4453 * and TX reclaim runs via tp->napi.poll inside of a software
4454 * interrupt. Furthermore, IRQ processing runs lockless so we have
4455 * no IRQ context deadlocks to worry about either. Rejoice!
4456 */
4457 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4458 if (!netif_queue_stopped(dev)) {
4459 netif_stop_queue(dev);
4460
4461 /* This is a hard error, log it. */
4462 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4463 "queue awake!\n", dev->name);
4464 }
4465 return NETDEV_TX_BUSY;
4466 }
4467
4468 entry = tp->tx_prod;
4469 base_flags = 0;
4470 if (skb->ip_summed == CHECKSUM_PARTIAL)
4471 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4472 mss = 0;
4473 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4474 struct iphdr *iph;
4475 int tcp_opt_len, ip_tcp_len, hdr_len;
4476
4477 if (skb_header_cloned(skb) &&
4478 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4479 dev_kfree_skb(skb);
4480 goto out_unlock;
4481 }
4482
4483 tcp_opt_len = tcp_optlen(skb);
4484 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4485
4486 hdr_len = ip_tcp_len + tcp_opt_len;
4487 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4488 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4489 return (tg3_tso_bug(tp, skb));
4490
4491 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4492 TXD_FLAG_CPU_POST_DMA);
4493
4494 iph = ip_hdr(skb);
4495 iph->check = 0;
4496 iph->tot_len = htons(mss + hdr_len);
4497 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4498 tcp_hdr(skb)->check = 0;
4499 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4500 } else
4501 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4502 iph->daddr, 0,
4503 IPPROTO_TCP,
4504 0);
4505
4506 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4507 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4508 if (tcp_opt_len || iph->ihl > 5) {
4509 int tsflags;
4510
4511 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4512 mss |= (tsflags << 11);
4513 }
4514 } else {
4515 if (tcp_opt_len || iph->ihl > 5) {
4516 int tsflags;
4517
4518 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4519 base_flags |= tsflags << 12;
4520 }
4521 }
4522 }
4523 #if TG3_VLAN_TAG_USED
4524 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4525 base_flags |= (TXD_FLAG_VLAN |
4526 (vlan_tx_tag_get(skb) << 16));
4527 #endif
4528
4529 /* Queue skb data, a.k.a. the main skb fragment. */
4530 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4531
4532 tp->tx_buffers[entry].skb = skb;
4533 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4534
4535 would_hit_hwbug = 0;
4536
4537 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4538 would_hit_hwbug = 1;
4539 else if (tg3_4g_overflow_test(mapping, len))
4540 would_hit_hwbug = 1;
4541
4542 tg3_set_txd(tp, entry, mapping, len, base_flags,
4543 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4544
4545 entry = NEXT_TX(entry);
4546
4547 /* Now loop through additional data fragments, and queue them. */
4548 if (skb_shinfo(skb)->nr_frags > 0) {
4549 unsigned int i, last;
4550
4551 last = skb_shinfo(skb)->nr_frags - 1;
4552 for (i = 0; i <= last; i++) {
4553 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4554
4555 len = frag->size;
4556 mapping = pci_map_page(tp->pdev,
4557 frag->page,
4558 frag->page_offset,
4559 len, PCI_DMA_TODEVICE);
4560
4561 tp->tx_buffers[entry].skb = NULL;
4562 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4563
4564 if (tg3_4g_overflow_test(mapping, len))
4565 would_hit_hwbug = 1;
4566
4567 if (tg3_40bit_overflow_test(tp, mapping, len))
4568 would_hit_hwbug = 1;
4569
4570 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4571 tg3_set_txd(tp, entry, mapping, len,
4572 base_flags, (i == last)|(mss << 1));
4573 else
4574 tg3_set_txd(tp, entry, mapping, len,
4575 base_flags, (i == last));
4576
4577 entry = NEXT_TX(entry);
4578 }
4579 }
4580
4581 if (would_hit_hwbug) {
4582 u32 last_plus_one = entry;
4583 u32 start;
4584
4585 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4586 start &= (TG3_TX_RING_SIZE - 1);
4587
4588 /* If the workaround fails due to memory/mapping
4589 * failure, silently drop this packet.
4590 */
4591 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4592 &start, base_flags, mss))
4593 goto out_unlock;
4594
4595 entry = start;
4596 }
4597
4598 /* Packets are ready, update Tx producer idx local and on card. */
4599 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4600
4601 tp->tx_prod = entry;
4602 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4603 netif_stop_queue(dev);
4604 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4605 netif_wake_queue(tp->dev);
4606 }
4607
4608 out_unlock:
4609 mmiowb();
4610
4611 dev->trans_start = jiffies;
4612
4613 return NETDEV_TX_OK;
4614 }
4615
4616 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4617 int new_mtu)
4618 {
4619 dev->mtu = new_mtu;
4620
4621 if (new_mtu > ETH_DATA_LEN) {
4622 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4623 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4624 ethtool_op_set_tso(dev, 0);
4625 }
4626 else
4627 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4628 } else {
4629 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4630 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4631 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4632 }
4633 }
4634
4635 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4636 {
4637 struct tg3 *tp = netdev_priv(dev);
4638 int err;
4639
4640 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4641 return -EINVAL;
4642
4643 if (!netif_running(dev)) {
4644 /* We'll just catch it later when the
4645 * device is up'd.
4646 */
4647 tg3_set_mtu(dev, tp, new_mtu);
4648 return 0;
4649 }
4650
4651 tg3_netif_stop(tp);
4652
4653 tg3_full_lock(tp, 1);
4654
4655 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4656
4657 tg3_set_mtu(dev, tp, new_mtu);
4658
4659 err = tg3_restart_hw(tp, 0);
4660
4661 if (!err)
4662 tg3_netif_start(tp);
4663
4664 tg3_full_unlock(tp);
4665
4666 return err;
4667 }
4668
4669 /* Free up pending packets in all rx/tx rings.
4670 *
4671 * The chip has been shut down and the driver detached from
4672 * the networking, so no interrupts or new tx packets will
4673 * end up in the driver. tp->{tx,}lock is not held and we are not
4674 * in an interrupt context and thus may sleep.
4675 */
4676 static void tg3_free_rings(struct tg3 *tp)
4677 {
4678 struct ring_info *rxp;
4679 int i;
4680
4681 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4682 rxp = &tp->rx_std_buffers[i];
4683
4684 if (rxp->skb == NULL)
4685 continue;
4686 pci_unmap_single(tp->pdev,
4687 pci_unmap_addr(rxp, mapping),
4688 tp->rx_pkt_buf_sz - tp->rx_offset,
4689 PCI_DMA_FROMDEVICE);
4690 dev_kfree_skb_any(rxp->skb);
4691 rxp->skb = NULL;
4692 }
4693
4694 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4695 rxp = &tp->rx_jumbo_buffers[i];
4696
4697 if (rxp->skb == NULL)
4698 continue;
4699 pci_unmap_single(tp->pdev,
4700 pci_unmap_addr(rxp, mapping),
4701 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4702 PCI_DMA_FROMDEVICE);
4703 dev_kfree_skb_any(rxp->skb);
4704 rxp->skb = NULL;
4705 }
4706
4707 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4708 struct tx_ring_info *txp;
4709 struct sk_buff *skb;
4710 int j;
4711
4712 txp = &tp->tx_buffers[i];
4713 skb = txp->skb;
4714
4715 if (skb == NULL) {
4716 i++;
4717 continue;
4718 }
4719
4720 pci_unmap_single(tp->pdev,
4721 pci_unmap_addr(txp, mapping),
4722 skb_headlen(skb),
4723 PCI_DMA_TODEVICE);
4724 txp->skb = NULL;
4725
4726 i++;
4727
4728 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4729 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4730 pci_unmap_page(tp->pdev,
4731 pci_unmap_addr(txp, mapping),
4732 skb_shinfo(skb)->frags[j].size,
4733 PCI_DMA_TODEVICE);
4734 i++;
4735 }
4736
4737 dev_kfree_skb_any(skb);
4738 }
4739 }
4740
4741 /* Initialize tx/rx rings for packet processing.
4742 *
4743 * The chip has been shut down and the driver detached from
4744 * the networking, so no interrupts or new tx packets will
4745 * end up in the driver. tp->{tx,}lock are held and thus
4746 * we may not sleep.
4747 */
4748 static int tg3_init_rings(struct tg3 *tp)
4749 {
4750 u32 i;
4751
4752 /* Free up all the SKBs. */
4753 tg3_free_rings(tp);
4754
4755 /* Zero out all descriptors. */
4756 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4757 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4758 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4759 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4760
4761 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4762 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4763 (tp->dev->mtu > ETH_DATA_LEN))
4764 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4765
4766 /* Initialize invariants of the rings, we only set this
4767 * stuff once. This works because the card does not
4768 * write into the rx buffer posting rings.
4769 */
4770 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4771 struct tg3_rx_buffer_desc *rxd;
4772
4773 rxd = &tp->rx_std[i];
4774 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4775 << RXD_LEN_SHIFT;
4776 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4777 rxd->opaque = (RXD_OPAQUE_RING_STD |
4778 (i << RXD_OPAQUE_INDEX_SHIFT));
4779 }
4780
4781 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4782 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4783 struct tg3_rx_buffer_desc *rxd;
4784
4785 rxd = &tp->rx_jumbo[i];
4786 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4787 << RXD_LEN_SHIFT;
4788 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4789 RXD_FLAG_JUMBO;
4790 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4791 (i << RXD_OPAQUE_INDEX_SHIFT));
4792 }
4793 }
4794
4795 /* Now allocate fresh SKBs for each rx ring. */
4796 for (i = 0; i < tp->rx_pending; i++) {
4797 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4798 printk(KERN_WARNING PFX
4799 "%s: Using a smaller RX standard ring, "
4800 "only %d out of %d buffers were allocated "
4801 "successfully.\n",
4802 tp->dev->name, i, tp->rx_pending);
4803 if (i == 0)
4804 return -ENOMEM;
4805 tp->rx_pending = i;
4806 break;
4807 }
4808 }
4809
4810 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4811 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4812 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4813 -1, i) < 0) {
4814 printk(KERN_WARNING PFX
4815 "%s: Using a smaller RX jumbo ring, "
4816 "only %d out of %d buffers were "
4817 "allocated successfully.\n",
4818 tp->dev->name, i, tp->rx_jumbo_pending);
4819 if (i == 0) {
4820 tg3_free_rings(tp);
4821 return -ENOMEM;
4822 }
4823 tp->rx_jumbo_pending = i;
4824 break;
4825 }
4826 }
4827 }
4828 return 0;
4829 }
4830
4831 /*
4832 * Must not be invoked with interrupt sources disabled and
4833 * the hardware shutdown down.
4834 */
4835 static void tg3_free_consistent(struct tg3 *tp)
4836 {
4837 kfree(tp->rx_std_buffers);
4838 tp->rx_std_buffers = NULL;
4839 if (tp->rx_std) {
4840 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4841 tp->rx_std, tp->rx_std_mapping);
4842 tp->rx_std = NULL;
4843 }
4844 if (tp->rx_jumbo) {
4845 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4846 tp->rx_jumbo, tp->rx_jumbo_mapping);
4847 tp->rx_jumbo = NULL;
4848 }
4849 if (tp->rx_rcb) {
4850 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4851 tp->rx_rcb, tp->rx_rcb_mapping);
4852 tp->rx_rcb = NULL;
4853 }
4854 if (tp->tx_ring) {
4855 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4856 tp->tx_ring, tp->tx_desc_mapping);
4857 tp->tx_ring = NULL;
4858 }
4859 if (tp->hw_status) {
4860 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4861 tp->hw_status, tp->status_mapping);
4862 tp->hw_status = NULL;
4863 }
4864 if (tp->hw_stats) {
4865 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4866 tp->hw_stats, tp->stats_mapping);
4867 tp->hw_stats = NULL;
4868 }
4869 }
4870
4871 /*
4872 * Must not be invoked with interrupt sources disabled and
4873 * the hardware shutdown down. Can sleep.
4874 */
4875 static int tg3_alloc_consistent(struct tg3 *tp)
4876 {
4877 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4878 (TG3_RX_RING_SIZE +
4879 TG3_RX_JUMBO_RING_SIZE)) +
4880 (sizeof(struct tx_ring_info) *
4881 TG3_TX_RING_SIZE),
4882 GFP_KERNEL);
4883 if (!tp->rx_std_buffers)
4884 return -ENOMEM;
4885
4886 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4887 tp->tx_buffers = (struct tx_ring_info *)
4888 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4889
4890 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4891 &tp->rx_std_mapping);
4892 if (!tp->rx_std)
4893 goto err_out;
4894
4895 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4896 &tp->rx_jumbo_mapping);
4897
4898 if (!tp->rx_jumbo)
4899 goto err_out;
4900
4901 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4902 &tp->rx_rcb_mapping);
4903 if (!tp->rx_rcb)
4904 goto err_out;
4905
4906 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4907 &tp->tx_desc_mapping);
4908 if (!tp->tx_ring)
4909 goto err_out;
4910
4911 tp->hw_status = pci_alloc_consistent(tp->pdev,
4912 TG3_HW_STATUS_SIZE,
4913 &tp->status_mapping);
4914 if (!tp->hw_status)
4915 goto err_out;
4916
4917 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4918 sizeof(struct tg3_hw_stats),
4919 &tp->stats_mapping);
4920 if (!tp->hw_stats)
4921 goto err_out;
4922
4923 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4924 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4925
4926 return 0;
4927
4928 err_out:
4929 tg3_free_consistent(tp);
4930 return -ENOMEM;
4931 }
4932
4933 #define MAX_WAIT_CNT 1000
4934
4935 /* To stop a block, clear the enable bit and poll till it
4936 * clears. tp->lock is held.
4937 */
4938 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4939 {
4940 unsigned int i;
4941 u32 val;
4942
4943 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4944 switch (ofs) {
4945 case RCVLSC_MODE:
4946 case DMAC_MODE:
4947 case MBFREE_MODE:
4948 case BUFMGR_MODE:
4949 case MEMARB_MODE:
4950 /* We can't enable/disable these bits of the
4951 * 5705/5750, just say success.
4952 */
4953 return 0;
4954
4955 default:
4956 break;
4957 };
4958 }
4959
4960 val = tr32(ofs);
4961 val &= ~enable_bit;
4962 tw32_f(ofs, val);
4963
4964 for (i = 0; i < MAX_WAIT_CNT; i++) {
4965 udelay(100);
4966 val = tr32(ofs);
4967 if ((val & enable_bit) == 0)
4968 break;
4969 }
4970
4971 if (i == MAX_WAIT_CNT && !silent) {
4972 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4973 "ofs=%lx enable_bit=%x\n",
4974 ofs, enable_bit);
4975 return -ENODEV;
4976 }
4977
4978 return 0;
4979 }
4980
4981 /* tp->lock is held. */
4982 static int tg3_abort_hw(struct tg3 *tp, int silent)
4983 {
4984 int i, err;
4985
4986 tg3_disable_ints(tp);
4987
4988 tp->rx_mode &= ~RX_MODE_ENABLE;
4989 tw32_f(MAC_RX_MODE, tp->rx_mode);
4990 udelay(10);
4991
4992 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4993 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4994 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4995 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4996 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4997 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4998
4999 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5000 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5001 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5002 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5003 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5004 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5005 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5006
5007 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5008 tw32_f(MAC_MODE, tp->mac_mode);
5009 udelay(40);
5010
5011 tp->tx_mode &= ~TX_MODE_ENABLE;
5012 tw32_f(MAC_TX_MODE, tp->tx_mode);
5013
5014 for (i = 0; i < MAX_WAIT_CNT; i++) {
5015 udelay(100);
5016 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5017 break;
5018 }
5019 if (i >= MAX_WAIT_CNT) {
5020 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5021 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5022 tp->dev->name, tr32(MAC_TX_MODE));
5023 err |= -ENODEV;
5024 }
5025
5026 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5027 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5028 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5029
5030 tw32(FTQ_RESET, 0xffffffff);
5031 tw32(FTQ_RESET, 0x00000000);
5032
5033 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5034 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5035
5036 if (tp->hw_status)
5037 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5038 if (tp->hw_stats)
5039 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5040
5041 return err;
5042 }
5043
5044 /* tp->lock is held. */
5045 static int tg3_nvram_lock(struct tg3 *tp)
5046 {
5047 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5048 int i;
5049
5050 if (tp->nvram_lock_cnt == 0) {
5051 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5052 for (i = 0; i < 8000; i++) {
5053 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5054 break;
5055 udelay(20);
5056 }
5057 if (i == 8000) {
5058 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5059 return -ENODEV;
5060 }
5061 }
5062 tp->nvram_lock_cnt++;
5063 }
5064 return 0;
5065 }
5066
5067 /* tp->lock is held. */
5068 static void tg3_nvram_unlock(struct tg3 *tp)
5069 {
5070 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5071 if (tp->nvram_lock_cnt > 0)
5072 tp->nvram_lock_cnt--;
5073 if (tp->nvram_lock_cnt == 0)
5074 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5075 }
5076 }
5077
5078 /* tp->lock is held. */
5079 static void tg3_enable_nvram_access(struct tg3 *tp)
5080 {
5081 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5082 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5083 u32 nvaccess = tr32(NVRAM_ACCESS);
5084
5085 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5086 }
5087 }
5088
5089 /* tp->lock is held. */
5090 static void tg3_disable_nvram_access(struct tg3 *tp)
5091 {
5092 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5093 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5094 u32 nvaccess = tr32(NVRAM_ACCESS);
5095
5096 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5097 }
5098 }
5099
5100 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5101 {
5102 int i;
5103 u32 apedata;
5104
5105 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5106 if (apedata != APE_SEG_SIG_MAGIC)
5107 return;
5108
5109 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5110 if (apedata != APE_FW_STATUS_READY)
5111 return;
5112
5113 /* Wait for up to 1 millisecond for APE to service previous event. */
5114 for (i = 0; i < 10; i++) {
5115 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5116 return;
5117
5118 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5119
5120 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5121 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5122 event | APE_EVENT_STATUS_EVENT_PENDING);
5123
5124 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5125
5126 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5127 break;
5128
5129 udelay(100);
5130 }
5131
5132 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5133 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5134 }
5135
5136 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5137 {
5138 u32 event;
5139 u32 apedata;
5140
5141 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5142 return;
5143
5144 switch (kind) {
5145 case RESET_KIND_INIT:
5146 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5147 APE_HOST_SEG_SIG_MAGIC);
5148 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5149 APE_HOST_SEG_LEN_MAGIC);
5150 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5151 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5152 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5153 APE_HOST_DRIVER_ID_MAGIC);
5154 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5155 APE_HOST_BEHAV_NO_PHYLOCK);
5156
5157 event = APE_EVENT_STATUS_STATE_START;
5158 break;
5159 case RESET_KIND_SHUTDOWN:
5160 event = APE_EVENT_STATUS_STATE_UNLOAD;
5161 break;
5162 case RESET_KIND_SUSPEND:
5163 event = APE_EVENT_STATUS_STATE_SUSPEND;
5164 break;
5165 default:
5166 return;
5167 }
5168
5169 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5170
5171 tg3_ape_send_event(tp, event);
5172 }
5173
5174 /* tp->lock is held. */
5175 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5176 {
5177 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5178 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5179
5180 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5181 switch (kind) {
5182 case RESET_KIND_INIT:
5183 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5184 DRV_STATE_START);
5185 break;
5186
5187 case RESET_KIND_SHUTDOWN:
5188 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5189 DRV_STATE_UNLOAD);
5190 break;
5191
5192 case RESET_KIND_SUSPEND:
5193 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5194 DRV_STATE_SUSPEND);
5195 break;
5196
5197 default:
5198 break;
5199 };
5200 }
5201
5202 if (kind == RESET_KIND_INIT ||
5203 kind == RESET_KIND_SUSPEND)
5204 tg3_ape_driver_state_change(tp, kind);
5205 }
5206
5207 /* tp->lock is held. */
5208 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5209 {
5210 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5211 switch (kind) {
5212 case RESET_KIND_INIT:
5213 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5214 DRV_STATE_START_DONE);
5215 break;
5216
5217 case RESET_KIND_SHUTDOWN:
5218 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5219 DRV_STATE_UNLOAD_DONE);
5220 break;
5221
5222 default:
5223 break;
5224 };
5225 }
5226
5227 if (kind == RESET_KIND_SHUTDOWN)
5228 tg3_ape_driver_state_change(tp, kind);
5229 }
5230
5231 /* tp->lock is held. */
5232 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5233 {
5234 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5235 switch (kind) {
5236 case RESET_KIND_INIT:
5237 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5238 DRV_STATE_START);
5239 break;
5240
5241 case RESET_KIND_SHUTDOWN:
5242 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5243 DRV_STATE_UNLOAD);
5244 break;
5245
5246 case RESET_KIND_SUSPEND:
5247 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5248 DRV_STATE_SUSPEND);
5249 break;
5250
5251 default:
5252 break;
5253 };
5254 }
5255 }
5256
5257 static int tg3_poll_fw(struct tg3 *tp)
5258 {
5259 int i;
5260 u32 val;
5261
5262 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5263 /* Wait up to 20ms for init done. */
5264 for (i = 0; i < 200; i++) {
5265 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5266 return 0;
5267 udelay(100);
5268 }
5269 return -ENODEV;
5270 }
5271
5272 /* Wait for firmware initialization to complete. */
5273 for (i = 0; i < 100000; i++) {
5274 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5275 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5276 break;
5277 udelay(10);
5278 }
5279
5280 /* Chip might not be fitted with firmware. Some Sun onboard
5281 * parts are configured like that. So don't signal the timeout
5282 * of the above loop as an error, but do report the lack of
5283 * running firmware once.
5284 */
5285 if (i >= 100000 &&
5286 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5287 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5288
5289 printk(KERN_INFO PFX "%s: No firmware running.\n",
5290 tp->dev->name);
5291 }
5292
5293 return 0;
5294 }
5295
5296 /* Save PCI command register before chip reset */
5297 static void tg3_save_pci_state(struct tg3 *tp)
5298 {
5299 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5300 }
5301
5302 /* Restore PCI state after chip reset */
5303 static void tg3_restore_pci_state(struct tg3 *tp)
5304 {
5305 u32 val;
5306
5307 /* Re-enable indirect register accesses. */
5308 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5309 tp->misc_host_ctrl);
5310
5311 /* Set MAX PCI retry to zero. */
5312 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5313 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5314 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5315 val |= PCISTATE_RETRY_SAME_DMA;
5316 /* Allow reads and writes to the APE register and memory space. */
5317 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5318 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5319 PCISTATE_ALLOW_APE_SHMEM_WR;
5320 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5321
5322 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5323
5324 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5325 pcie_set_readrq(tp->pdev, 4096);
5326 else {
5327 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5328 tp->pci_cacheline_sz);
5329 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5330 tp->pci_lat_timer);
5331 }
5332
5333 /* Make sure PCI-X relaxed ordering bit is clear. */
5334 if (tp->pcix_cap) {
5335 u16 pcix_cmd;
5336
5337 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5338 &pcix_cmd);
5339 pcix_cmd &= ~PCI_X_CMD_ERO;
5340 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5341 pcix_cmd);
5342 }
5343
5344 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5345
5346 /* Chip reset on 5780 will reset MSI enable bit,
5347 * so need to restore it.
5348 */
5349 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5350 u16 ctrl;
5351
5352 pci_read_config_word(tp->pdev,
5353 tp->msi_cap + PCI_MSI_FLAGS,
5354 &ctrl);
5355 pci_write_config_word(tp->pdev,
5356 tp->msi_cap + PCI_MSI_FLAGS,
5357 ctrl | PCI_MSI_FLAGS_ENABLE);
5358 val = tr32(MSGINT_MODE);
5359 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5360 }
5361 }
5362 }
5363
5364 static void tg3_stop_fw(struct tg3 *);
5365
5366 /* tp->lock is held. */
5367 static int tg3_chip_reset(struct tg3 *tp)
5368 {
5369 u32 val;
5370 void (*write_op)(struct tg3 *, u32, u32);
5371 int err;
5372
5373 tg3_nvram_lock(tp);
5374
5375 /* No matching tg3_nvram_unlock() after this because
5376 * chip reset below will undo the nvram lock.
5377 */
5378 tp->nvram_lock_cnt = 0;
5379
5380 /* GRC_MISC_CFG core clock reset will clear the memory
5381 * enable bit in PCI register 4 and the MSI enable bit
5382 * on some chips, so we save relevant registers here.
5383 */
5384 tg3_save_pci_state(tp);
5385
5386 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5387 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5390 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5391 tw32(GRC_FASTBOOT_PC, 0);
5392
5393 /*
5394 * We must avoid the readl() that normally takes place.
5395 * It locks machines, causes machine checks, and other
5396 * fun things. So, temporarily disable the 5701
5397 * hardware workaround, while we do the reset.
5398 */
5399 write_op = tp->write32;
5400 if (write_op == tg3_write_flush_reg32)
5401 tp->write32 = tg3_write32;
5402
5403 /* Prevent the irq handler from reading or writing PCI registers
5404 * during chip reset when the memory enable bit in the PCI command
5405 * register may be cleared. The chip does not generate interrupt
5406 * at this time, but the irq handler may still be called due to irq
5407 * sharing or irqpoll.
5408 */
5409 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5410 if (tp->hw_status) {
5411 tp->hw_status->status = 0;
5412 tp->hw_status->status_tag = 0;
5413 }
5414 tp->last_tag = 0;
5415 smp_mb();
5416 synchronize_irq(tp->pdev->irq);
5417
5418 /* do the reset */
5419 val = GRC_MISC_CFG_CORECLK_RESET;
5420
5421 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5422 if (tr32(0x7e2c) == 0x60) {
5423 tw32(0x7e2c, 0x20);
5424 }
5425 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5426 tw32(GRC_MISC_CFG, (1 << 29));
5427 val |= (1 << 29);
5428 }
5429 }
5430
5431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5432 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5433 tw32(GRC_VCPU_EXT_CTRL,
5434 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5435 }
5436
5437 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5438 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5439 tw32(GRC_MISC_CFG, val);
5440
5441 /* restore 5701 hardware bug workaround write method */
5442 tp->write32 = write_op;
5443
5444 /* Unfortunately, we have to delay before the PCI read back.
5445 * Some 575X chips even will not respond to a PCI cfg access
5446 * when the reset command is given to the chip.
5447 *
5448 * How do these hardware designers expect things to work
5449 * properly if the PCI write is posted for a long period
5450 * of time? It is always necessary to have some method by
5451 * which a register read back can occur to push the write
5452 * out which does the reset.
5453 *
5454 * For most tg3 variants the trick below was working.
5455 * Ho hum...
5456 */
5457 udelay(120);
5458
5459 /* Flush PCI posted writes. The normal MMIO registers
5460 * are inaccessible at this time so this is the only
5461 * way to make this reliably (actually, this is no longer
5462 * the case, see above). I tried to use indirect
5463 * register read/write but this upset some 5701 variants.
5464 */
5465 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5466
5467 udelay(120);
5468
5469 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5470 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5471 int i;
5472 u32 cfg_val;
5473
5474 /* Wait for link training to complete. */
5475 for (i = 0; i < 5000; i++)
5476 udelay(100);
5477
5478 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5479 pci_write_config_dword(tp->pdev, 0xc4,
5480 cfg_val | (1 << 15));
5481 }
5482 /* Set PCIE max payload size and clear error status. */
5483 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5484 }
5485
5486 tg3_restore_pci_state(tp);
5487
5488 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5489
5490 val = 0;
5491 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5492 val = tr32(MEMARB_MODE);
5493 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5494
5495 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5496 tg3_stop_fw(tp);
5497 tw32(0x5000, 0x400);
5498 }
5499
5500 tw32(GRC_MODE, tp->grc_mode);
5501
5502 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5503 val = tr32(0xc4);
5504
5505 tw32(0xc4, val | (1 << 15));
5506 }
5507
5508 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5509 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5510 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5511 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5512 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5513 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5514 }
5515
5516 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5517 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5518 tw32_f(MAC_MODE, tp->mac_mode);
5519 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5520 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5521 tw32_f(MAC_MODE, tp->mac_mode);
5522 } else
5523 tw32_f(MAC_MODE, 0);
5524 udelay(40);
5525
5526 err = tg3_poll_fw(tp);
5527 if (err)
5528 return err;
5529
5530 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5531 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5532 val = tr32(0x7c00);
5533
5534 tw32(0x7c00, val | (1 << 25));
5535 }
5536
5537 /* Reprobe ASF enable state. */
5538 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5539 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5540 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5541 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5542 u32 nic_cfg;
5543
5544 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5545 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5546 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5547 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5548 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5549 }
5550 }
5551
5552 return 0;
5553 }
5554
5555 /* tp->lock is held. */
5556 static void tg3_stop_fw(struct tg3 *tp)
5557 {
5558 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5559 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5560 u32 val;
5561
5562 /* Wait for RX cpu to ACK the previous event. */
5563 tg3_wait_for_event_ack(tp);
5564
5565 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5566 val = tr32(GRC_RX_CPU_EVENT);
5567 val |= GRC_RX_CPU_DRIVER_EVENT;
5568 tw32(GRC_RX_CPU_EVENT, val);
5569
5570 /* Wait for RX cpu to ACK this event. */
5571 tg3_wait_for_event_ack(tp);
5572 }
5573 }
5574
5575 /* tp->lock is held. */
5576 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5577 {
5578 int err;
5579
5580 tg3_stop_fw(tp);
5581
5582 tg3_write_sig_pre_reset(tp, kind);
5583
5584 tg3_abort_hw(tp, silent);
5585 err = tg3_chip_reset(tp);
5586
5587 tg3_write_sig_legacy(tp, kind);
5588 tg3_write_sig_post_reset(tp, kind);
5589
5590 if (err)
5591 return err;
5592
5593 return 0;
5594 }
5595
5596 #define TG3_FW_RELEASE_MAJOR 0x0
5597 #define TG3_FW_RELASE_MINOR 0x0
5598 #define TG3_FW_RELEASE_FIX 0x0
5599 #define TG3_FW_START_ADDR 0x08000000
5600 #define TG3_FW_TEXT_ADDR 0x08000000
5601 #define TG3_FW_TEXT_LEN 0x9c0
5602 #define TG3_FW_RODATA_ADDR 0x080009c0
5603 #define TG3_FW_RODATA_LEN 0x60
5604 #define TG3_FW_DATA_ADDR 0x08000a40
5605 #define TG3_FW_DATA_LEN 0x20
5606 #define TG3_FW_SBSS_ADDR 0x08000a60
5607 #define TG3_FW_SBSS_LEN 0xc
5608 #define TG3_FW_BSS_ADDR 0x08000a70
5609 #define TG3_FW_BSS_LEN 0x10
5610
5611 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5612 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5613 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5614 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5615 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5616 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5617 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5618 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5619 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5620 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5621 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5622 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5623 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5624 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5625 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5626 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5627 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5628 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5629 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5630 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5631 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5632 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5633 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5634 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5635 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5636 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5637 0, 0, 0, 0, 0, 0,
5638 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5639 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5640 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5641 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5642 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5643 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5644 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5645 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5646 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5647 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5648 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5649 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5650 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5651 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5652 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5653 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5654 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5655 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5656 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5657 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5658 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5659 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5660 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5661 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5662 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5663 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5664 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5665 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5666 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5667 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5668 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5669 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5670 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5671 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5672 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5673 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5674 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5675 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5676 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5677 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5678 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5679 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5680 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5681 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5682 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5683 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5684 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5685 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5686 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5687 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5688 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5689 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5690 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5691 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5692 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5693 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5694 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5695 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5696 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5697 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5698 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5699 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5700 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5701 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5702 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5703 };
5704
5705 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5706 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5707 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5708 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5709 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5710 0x00000000
5711 };
5712
5713 #if 0 /* All zeros, don't eat up space with it. */
5714 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5715 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5716 0x00000000, 0x00000000, 0x00000000, 0x00000000
5717 };
5718 #endif
5719
5720 #define RX_CPU_SCRATCH_BASE 0x30000
5721 #define RX_CPU_SCRATCH_SIZE 0x04000
5722 #define TX_CPU_SCRATCH_BASE 0x34000
5723 #define TX_CPU_SCRATCH_SIZE 0x04000
5724
5725 /* tp->lock is held. */
5726 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5727 {
5728 int i;
5729
5730 BUG_ON(offset == TX_CPU_BASE &&
5731 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5732
5733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5734 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5735
5736 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5737 return 0;
5738 }
5739 if (offset == RX_CPU_BASE) {
5740 for (i = 0; i < 10000; i++) {
5741 tw32(offset + CPU_STATE, 0xffffffff);
5742 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5743 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5744 break;
5745 }
5746
5747 tw32(offset + CPU_STATE, 0xffffffff);
5748 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5749 udelay(10);
5750 } else {
5751 for (i = 0; i < 10000; i++) {
5752 tw32(offset + CPU_STATE, 0xffffffff);
5753 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5754 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5755 break;
5756 }
5757 }
5758
5759 if (i >= 10000) {
5760 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5761 "and %s CPU\n",
5762 tp->dev->name,
5763 (offset == RX_CPU_BASE ? "RX" : "TX"));
5764 return -ENODEV;
5765 }
5766
5767 /* Clear firmware's nvram arbitration. */
5768 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5769 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5770 return 0;
5771 }
5772
5773 struct fw_info {
5774 unsigned int text_base;
5775 unsigned int text_len;
5776 const u32 *text_data;
5777 unsigned int rodata_base;
5778 unsigned int rodata_len;
5779 const u32 *rodata_data;
5780 unsigned int data_base;
5781 unsigned int data_len;
5782 const u32 *data_data;
5783 };
5784
5785 /* tp->lock is held. */
5786 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5787 int cpu_scratch_size, struct fw_info *info)
5788 {
5789 int err, lock_err, i;
5790 void (*write_op)(struct tg3 *, u32, u32);
5791
5792 if (cpu_base == TX_CPU_BASE &&
5793 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5794 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5795 "TX cpu firmware on %s which is 5705.\n",
5796 tp->dev->name);
5797 return -EINVAL;
5798 }
5799
5800 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5801 write_op = tg3_write_mem;
5802 else
5803 write_op = tg3_write_indirect_reg32;
5804
5805 /* It is possible that bootcode is still loading at this point.
5806 * Get the nvram lock first before halting the cpu.
5807 */
5808 lock_err = tg3_nvram_lock(tp);
5809 err = tg3_halt_cpu(tp, cpu_base);
5810 if (!lock_err)
5811 tg3_nvram_unlock(tp);
5812 if (err)
5813 goto out;
5814
5815 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5816 write_op(tp, cpu_scratch_base + i, 0);
5817 tw32(cpu_base + CPU_STATE, 0xffffffff);
5818 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5819 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5820 write_op(tp, (cpu_scratch_base +
5821 (info->text_base & 0xffff) +
5822 (i * sizeof(u32))),
5823 (info->text_data ?
5824 info->text_data[i] : 0));
5825 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5826 write_op(tp, (cpu_scratch_base +
5827 (info->rodata_base & 0xffff) +
5828 (i * sizeof(u32))),
5829 (info->rodata_data ?
5830 info->rodata_data[i] : 0));
5831 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5832 write_op(tp, (cpu_scratch_base +
5833 (info->data_base & 0xffff) +
5834 (i * sizeof(u32))),
5835 (info->data_data ?
5836 info->data_data[i] : 0));
5837
5838 err = 0;
5839
5840 out:
5841 return err;
5842 }
5843
5844 /* tp->lock is held. */
5845 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5846 {
5847 struct fw_info info;
5848 int err, i;
5849
5850 info.text_base = TG3_FW_TEXT_ADDR;
5851 info.text_len = TG3_FW_TEXT_LEN;
5852 info.text_data = &tg3FwText[0];
5853 info.rodata_base = TG3_FW_RODATA_ADDR;
5854 info.rodata_len = TG3_FW_RODATA_LEN;
5855 info.rodata_data = &tg3FwRodata[0];
5856 info.data_base = TG3_FW_DATA_ADDR;
5857 info.data_len = TG3_FW_DATA_LEN;
5858 info.data_data = NULL;
5859
5860 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5861 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5862 &info);
5863 if (err)
5864 return err;
5865
5866 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5867 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5868 &info);
5869 if (err)
5870 return err;
5871
5872 /* Now startup only the RX cpu. */
5873 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5874 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5875
5876 for (i = 0; i < 5; i++) {
5877 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5878 break;
5879 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5880 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5881 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5882 udelay(1000);
5883 }
5884 if (i >= 5) {
5885 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5886 "to set RX CPU PC, is %08x should be %08x\n",
5887 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5888 TG3_FW_TEXT_ADDR);
5889 return -ENODEV;
5890 }
5891 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5892 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5893
5894 return 0;
5895 }
5896
5897
5898 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5899 #define TG3_TSO_FW_RELASE_MINOR 0x6
5900 #define TG3_TSO_FW_RELEASE_FIX 0x0
5901 #define TG3_TSO_FW_START_ADDR 0x08000000
5902 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5903 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5904 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5905 #define TG3_TSO_FW_RODATA_LEN 0x60
5906 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5907 #define TG3_TSO_FW_DATA_LEN 0x30
5908 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5909 #define TG3_TSO_FW_SBSS_LEN 0x2c
5910 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5911 #define TG3_TSO_FW_BSS_LEN 0x894
5912
5913 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5914 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5915 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5916 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5917 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5918 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5919 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5920 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5921 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5922 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5923 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5924 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5925 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5926 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5927 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5928 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5929 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5930 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5931 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5932 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5933 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5934 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5935 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5936 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5937 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5938 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5939 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5940 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5941 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5942 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5943 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5944 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5945 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5946 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5947 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5948 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5949 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5950 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5951 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5952 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5953 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5954 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5955 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5956 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5957 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5958 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5959 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5960 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5961 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5962 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5963 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5964 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5965 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5966 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5967 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5968 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5969 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5970 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5971 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5972 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5973 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5974 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5975 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5976 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5977 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5978 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5979 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5980 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5981 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5982 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5983 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5984 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5985 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5986 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5987 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5988 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5989 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5990 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5991 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5992 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5993 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5994 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5995 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5996 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5997 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5998 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5999 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6000 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6001 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6002 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6003 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6004 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6005 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6006 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6007 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6008 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6009 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6010 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6011 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6012 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6013 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6014 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6015 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6016 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6017 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6018 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6019 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6020 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6021 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6022 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6023 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6024 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6025 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6026 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6027 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6028 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6029 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6030 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6031 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6032 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6033 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6034 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6035 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6036 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6037 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6038 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6039 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6040 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6041 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6042 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6043 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6044 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6045 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6046 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6047 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6048 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6049 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6050 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6051 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6052 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6053 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6054 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6055 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6056 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6057 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6058 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6059 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6060 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6061 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6062 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6063 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6064 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6065 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6066 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6067 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6068 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6069 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6070 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6071 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6072 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6073 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6074 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6075 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6076 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6077 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6078 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6079 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6080 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6081 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6082 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6083 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6084 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6085 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6086 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6087 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6088 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6089 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6090 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6091 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6092 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6093 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6094 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6095 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6096 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6097 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6098 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6099 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6100 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6101 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6102 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6103 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6104 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6105 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6106 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6107 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6108 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6109 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6110 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6111 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6112 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6113 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6114 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6115 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6116 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6117 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6118 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6119 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6120 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6121 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6122 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6123 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6124 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6125 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6126 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6127 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6128 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6129 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6130 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6131 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6132 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6133 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6134 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6135 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6136 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6137 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6138 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6139 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6140 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6141 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6142 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6143 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6144 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6145 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6146 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6147 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6148 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6149 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6150 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6151 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6152 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6153 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6154 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6155 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6156 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6157 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6158 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6159 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6160 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6161 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6162 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6163 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6164 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6165 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6166 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6167 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6168 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6169 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6170 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6171 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6172 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6173 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6174 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6175 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6176 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6177 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6178 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6179 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6180 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6181 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6182 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6183 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6184 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6185 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6186 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6187 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6188 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6189 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6190 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6191 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6192 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6193 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6194 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6195 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6196 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6197 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6198 };
6199
6200 static const u32 tg3TsoFwRodata[] = {
6201 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6202 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6203 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6204 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6205 0x00000000,
6206 };
6207
6208 static const u32 tg3TsoFwData[] = {
6209 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6210 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6211 0x00000000,
6212 };
6213
6214 /* 5705 needs a special version of the TSO firmware. */
6215 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6216 #define TG3_TSO5_FW_RELASE_MINOR 0x2
6217 #define TG3_TSO5_FW_RELEASE_FIX 0x0
6218 #define TG3_TSO5_FW_START_ADDR 0x00010000
6219 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6220 #define TG3_TSO5_FW_TEXT_LEN 0xe90
6221 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6222 #define TG3_TSO5_FW_RODATA_LEN 0x50
6223 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6224 #define TG3_TSO5_FW_DATA_LEN 0x20
6225 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6226 #define TG3_TSO5_FW_SBSS_LEN 0x28
6227 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6228 #define TG3_TSO5_FW_BSS_LEN 0x88
6229
6230 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6231 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6232 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6233 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6234 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6235 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6236 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6237 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6238 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6239 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6240 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6241 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6242 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6243 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6244 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6245 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6246 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6247 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6248 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6249 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6250 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6251 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6252 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6253 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6254 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6255 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6256 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6257 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6258 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6259 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6260 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6261 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6262 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6263 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6264 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6265 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6266 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6267 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6268 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6269 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6270 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6271 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6272 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6273 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6274 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6275 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6276 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6277 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6278 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6279 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6280 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6281 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6282 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6283 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6284 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6285 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6286 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6287 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6288 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6289 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6290 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6291 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6292 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6293 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6294 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6295 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6296 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6297 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6298 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6299 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6300 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6301 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6302 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6303 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6304 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6305 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6306 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6307 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6308 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6309 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6310 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6311 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6312 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6313 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6314 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6315 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6316 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6317 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6318 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6319 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6320 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6321 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6322 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6323 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6324 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6325 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6326 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6327 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6328 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6329 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6330 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6331 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6332 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6333 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6334 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6335 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6336 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6337 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6338 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6339 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6340 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6341 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6342 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6343 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6344 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6345 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6346 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6347 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6348 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6349 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6350 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6351 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6352 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6353 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6354 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6355 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6356 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6357 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6358 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6359 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6360 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6361 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6362 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6363 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6364 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6365 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6366 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6367 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6368 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6369 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6370 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6371 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6372 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6373 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6374 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6375 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6376 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6377 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6378 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6379 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6380 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6381 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6382 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6383 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6384 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6385 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6386 0x00000000, 0x00000000, 0x00000000,
6387 };
6388
6389 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6390 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6391 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6392 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6393 0x00000000, 0x00000000, 0x00000000,
6394 };
6395
6396 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6397 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6398 0x00000000, 0x00000000, 0x00000000,
6399 };
6400
6401 /* tp->lock is held. */
6402 static int tg3_load_tso_firmware(struct tg3 *tp)
6403 {
6404 struct fw_info info;
6405 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6406 int err, i;
6407
6408 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6409 return 0;
6410
6411 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6412 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6413 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6414 info.text_data = &tg3Tso5FwText[0];
6415 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6416 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6417 info.rodata_data = &tg3Tso5FwRodata[0];
6418 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6419 info.data_len = TG3_TSO5_FW_DATA_LEN;
6420 info.data_data = &tg3Tso5FwData[0];
6421 cpu_base = RX_CPU_BASE;
6422 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6423 cpu_scratch_size = (info.text_len +
6424 info.rodata_len +
6425 info.data_len +
6426 TG3_TSO5_FW_SBSS_LEN +
6427 TG3_TSO5_FW_BSS_LEN);
6428 } else {
6429 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6430 info.text_len = TG3_TSO_FW_TEXT_LEN;
6431 info.text_data = &tg3TsoFwText[0];
6432 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6433 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6434 info.rodata_data = &tg3TsoFwRodata[0];
6435 info.data_base = TG3_TSO_FW_DATA_ADDR;
6436 info.data_len = TG3_TSO_FW_DATA_LEN;
6437 info.data_data = &tg3TsoFwData[0];
6438 cpu_base = TX_CPU_BASE;
6439 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6440 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6441 }
6442
6443 err = tg3_load_firmware_cpu(tp, cpu_base,
6444 cpu_scratch_base, cpu_scratch_size,
6445 &info);
6446 if (err)
6447 return err;
6448
6449 /* Now startup the cpu. */
6450 tw32(cpu_base + CPU_STATE, 0xffffffff);
6451 tw32_f(cpu_base + CPU_PC, info.text_base);
6452
6453 for (i = 0; i < 5; i++) {
6454 if (tr32(cpu_base + CPU_PC) == info.text_base)
6455 break;
6456 tw32(cpu_base + CPU_STATE, 0xffffffff);
6457 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6458 tw32_f(cpu_base + CPU_PC, info.text_base);
6459 udelay(1000);
6460 }
6461 if (i >= 5) {
6462 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6463 "to set CPU PC, is %08x should be %08x\n",
6464 tp->dev->name, tr32(cpu_base + CPU_PC),
6465 info.text_base);
6466 return -ENODEV;
6467 }
6468 tw32(cpu_base + CPU_STATE, 0xffffffff);
6469 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6470 return 0;
6471 }
6472
6473
6474 /* tp->lock is held. */
6475 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6476 {
6477 u32 addr_high, addr_low;
6478 int i;
6479
6480 addr_high = ((tp->dev->dev_addr[0] << 8) |
6481 tp->dev->dev_addr[1]);
6482 addr_low = ((tp->dev->dev_addr[2] << 24) |
6483 (tp->dev->dev_addr[3] << 16) |
6484 (tp->dev->dev_addr[4] << 8) |
6485 (tp->dev->dev_addr[5] << 0));
6486 for (i = 0; i < 4; i++) {
6487 if (i == 1 && skip_mac_1)
6488 continue;
6489 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6490 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6491 }
6492
6493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6494 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6495 for (i = 0; i < 12; i++) {
6496 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6497 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6498 }
6499 }
6500
6501 addr_high = (tp->dev->dev_addr[0] +
6502 tp->dev->dev_addr[1] +
6503 tp->dev->dev_addr[2] +
6504 tp->dev->dev_addr[3] +
6505 tp->dev->dev_addr[4] +
6506 tp->dev->dev_addr[5]) &
6507 TX_BACKOFF_SEED_MASK;
6508 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6509 }
6510
6511 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6512 {
6513 struct tg3 *tp = netdev_priv(dev);
6514 struct sockaddr *addr = p;
6515 int err = 0, skip_mac_1 = 0;
6516
6517 if (!is_valid_ether_addr(addr->sa_data))
6518 return -EINVAL;
6519
6520 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6521
6522 if (!netif_running(dev))
6523 return 0;
6524
6525 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6526 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6527
6528 addr0_high = tr32(MAC_ADDR_0_HIGH);
6529 addr0_low = tr32(MAC_ADDR_0_LOW);
6530 addr1_high = tr32(MAC_ADDR_1_HIGH);
6531 addr1_low = tr32(MAC_ADDR_1_LOW);
6532
6533 /* Skip MAC addr 1 if ASF is using it. */
6534 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6535 !(addr1_high == 0 && addr1_low == 0))
6536 skip_mac_1 = 1;
6537 }
6538 spin_lock_bh(&tp->lock);
6539 __tg3_set_mac_addr(tp, skip_mac_1);
6540 spin_unlock_bh(&tp->lock);
6541
6542 return err;
6543 }
6544
6545 /* tp->lock is held. */
6546 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6547 dma_addr_t mapping, u32 maxlen_flags,
6548 u32 nic_addr)
6549 {
6550 tg3_write_mem(tp,
6551 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6552 ((u64) mapping >> 32));
6553 tg3_write_mem(tp,
6554 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6555 ((u64) mapping & 0xffffffff));
6556 tg3_write_mem(tp,
6557 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6558 maxlen_flags);
6559
6560 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6561 tg3_write_mem(tp,
6562 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6563 nic_addr);
6564 }
6565
6566 static void __tg3_set_rx_mode(struct net_device *);
6567 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6568 {
6569 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6570 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6571 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6572 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6573 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6574 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6575 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6576 }
6577 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6578 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6579 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6580 u32 val = ec->stats_block_coalesce_usecs;
6581
6582 if (!netif_carrier_ok(tp->dev))
6583 val = 0;
6584
6585 tw32(HOSTCC_STAT_COAL_TICKS, val);
6586 }
6587 }
6588
6589 /* tp->lock is held. */
6590 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6591 {
6592 u32 val, rdmac_mode;
6593 int i, err, limit;
6594
6595 tg3_disable_ints(tp);
6596
6597 tg3_stop_fw(tp);
6598
6599 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6600
6601 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6602 tg3_abort_hw(tp, 1);
6603 }
6604
6605 if (reset_phy)
6606 tg3_phy_reset(tp);
6607
6608 err = tg3_chip_reset(tp);
6609 if (err)
6610 return err;
6611
6612 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6613
6614 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6615 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6616 val = tr32(TG3_CPMU_CTRL);
6617 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6618 tw32(TG3_CPMU_CTRL, val);
6619
6620 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6621 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6622 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6623 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6624
6625 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6626 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6627 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6628 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6629
6630 val = tr32(TG3_CPMU_HST_ACC);
6631 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6632 val |= CPMU_HST_ACC_MACCLK_6_25;
6633 tw32(TG3_CPMU_HST_ACC, val);
6634 }
6635
6636 /* This works around an issue with Athlon chipsets on
6637 * B3 tigon3 silicon. This bit has no effect on any
6638 * other revision. But do not set this on PCI Express
6639 * chips and don't even touch the clocks if the CPMU is present.
6640 */
6641 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6642 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6643 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6644 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6645 }
6646
6647 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6648 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6649 val = tr32(TG3PCI_PCISTATE);
6650 val |= PCISTATE_RETRY_SAME_DMA;
6651 tw32(TG3PCI_PCISTATE, val);
6652 }
6653
6654 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6655 /* Allow reads and writes to the
6656 * APE register and memory space.
6657 */
6658 val = tr32(TG3PCI_PCISTATE);
6659 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6660 PCISTATE_ALLOW_APE_SHMEM_WR;
6661 tw32(TG3PCI_PCISTATE, val);
6662 }
6663
6664 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6665 /* Enable some hw fixes. */
6666 val = tr32(TG3PCI_MSI_DATA);
6667 val |= (1 << 26) | (1 << 28) | (1 << 29);
6668 tw32(TG3PCI_MSI_DATA, val);
6669 }
6670
6671 /* Descriptor ring init may make accesses to the
6672 * NIC SRAM area to setup the TX descriptors, so we
6673 * can only do this after the hardware has been
6674 * successfully reset.
6675 */
6676 err = tg3_init_rings(tp);
6677 if (err)
6678 return err;
6679
6680 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6681 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6682 /* This value is determined during the probe time DMA
6683 * engine test, tg3_test_dma.
6684 */
6685 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6686 }
6687
6688 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6689 GRC_MODE_4X_NIC_SEND_RINGS |
6690 GRC_MODE_NO_TX_PHDR_CSUM |
6691 GRC_MODE_NO_RX_PHDR_CSUM);
6692 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6693
6694 /* Pseudo-header checksum is done by hardware logic and not
6695 * the offload processers, so make the chip do the pseudo-
6696 * header checksums on receive. For transmit it is more
6697 * convenient to do the pseudo-header checksum in software
6698 * as Linux does that on transmit for us in all cases.
6699 */
6700 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6701
6702 tw32(GRC_MODE,
6703 tp->grc_mode |
6704 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6705
6706 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6707 val = tr32(GRC_MISC_CFG);
6708 val &= ~0xff;
6709 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6710 tw32(GRC_MISC_CFG, val);
6711
6712 /* Initialize MBUF/DESC pool. */
6713 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6714 /* Do nothing. */
6715 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6716 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6718 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6719 else
6720 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6721 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6722 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6723 }
6724 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6725 int fw_len;
6726
6727 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6728 TG3_TSO5_FW_RODATA_LEN +
6729 TG3_TSO5_FW_DATA_LEN +
6730 TG3_TSO5_FW_SBSS_LEN +
6731 TG3_TSO5_FW_BSS_LEN);
6732 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6733 tw32(BUFMGR_MB_POOL_ADDR,
6734 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6735 tw32(BUFMGR_MB_POOL_SIZE,
6736 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6737 }
6738
6739 if (tp->dev->mtu <= ETH_DATA_LEN) {
6740 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6741 tp->bufmgr_config.mbuf_read_dma_low_water);
6742 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6743 tp->bufmgr_config.mbuf_mac_rx_low_water);
6744 tw32(BUFMGR_MB_HIGH_WATER,
6745 tp->bufmgr_config.mbuf_high_water);
6746 } else {
6747 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6748 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6749 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6750 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6751 tw32(BUFMGR_MB_HIGH_WATER,
6752 tp->bufmgr_config.mbuf_high_water_jumbo);
6753 }
6754 tw32(BUFMGR_DMA_LOW_WATER,
6755 tp->bufmgr_config.dma_low_water);
6756 tw32(BUFMGR_DMA_HIGH_WATER,
6757 tp->bufmgr_config.dma_high_water);
6758
6759 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6760 for (i = 0; i < 2000; i++) {
6761 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6762 break;
6763 udelay(10);
6764 }
6765 if (i >= 2000) {
6766 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6767 tp->dev->name);
6768 return -ENODEV;
6769 }
6770
6771 /* Setup replenish threshold. */
6772 val = tp->rx_pending / 8;
6773 if (val == 0)
6774 val = 1;
6775 else if (val > tp->rx_std_max_post)
6776 val = tp->rx_std_max_post;
6777 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6778 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6779 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6780
6781 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6782 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6783 }
6784
6785 tw32(RCVBDI_STD_THRESH, val);
6786
6787 /* Initialize TG3_BDINFO's at:
6788 * RCVDBDI_STD_BD: standard eth size rx ring
6789 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6790 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6791 *
6792 * like so:
6793 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6794 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6795 * ring attribute flags
6796 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6797 *
6798 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6799 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6800 *
6801 * The size of each ring is fixed in the firmware, but the location is
6802 * configurable.
6803 */
6804 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6805 ((u64) tp->rx_std_mapping >> 32));
6806 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6807 ((u64) tp->rx_std_mapping & 0xffffffff));
6808 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6809 NIC_SRAM_RX_BUFFER_DESC);
6810
6811 /* Don't even try to program the JUMBO/MINI buffer descriptor
6812 * configs on 5705.
6813 */
6814 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6815 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6816 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6817 } else {
6818 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6819 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6820
6821 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6822 BDINFO_FLAGS_DISABLED);
6823
6824 /* Setup replenish threshold. */
6825 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6826
6827 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6828 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6829 ((u64) tp->rx_jumbo_mapping >> 32));
6830 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6831 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6832 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6833 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6834 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6835 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6836 } else {
6837 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6838 BDINFO_FLAGS_DISABLED);
6839 }
6840
6841 }
6842
6843 /* There is only one send ring on 5705/5750, no need to explicitly
6844 * disable the others.
6845 */
6846 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6847 /* Clear out send RCB ring in SRAM. */
6848 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6849 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6850 BDINFO_FLAGS_DISABLED);
6851 }
6852
6853 tp->tx_prod = 0;
6854 tp->tx_cons = 0;
6855 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6856 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6857
6858 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6859 tp->tx_desc_mapping,
6860 (TG3_TX_RING_SIZE <<
6861 BDINFO_FLAGS_MAXLEN_SHIFT),
6862 NIC_SRAM_TX_BUFFER_DESC);
6863
6864 /* There is only one receive return ring on 5705/5750, no need
6865 * to explicitly disable the others.
6866 */
6867 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6868 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6869 i += TG3_BDINFO_SIZE) {
6870 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6871 BDINFO_FLAGS_DISABLED);
6872 }
6873 }
6874
6875 tp->rx_rcb_ptr = 0;
6876 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6877
6878 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6879 tp->rx_rcb_mapping,
6880 (TG3_RX_RCB_RING_SIZE(tp) <<
6881 BDINFO_FLAGS_MAXLEN_SHIFT),
6882 0);
6883
6884 tp->rx_std_ptr = tp->rx_pending;
6885 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6886 tp->rx_std_ptr);
6887
6888 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6889 tp->rx_jumbo_pending : 0;
6890 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6891 tp->rx_jumbo_ptr);
6892
6893 /* Initialize MAC address and backoff seed. */
6894 __tg3_set_mac_addr(tp, 0);
6895
6896 /* MTU + ethernet header + FCS + optional VLAN tag */
6897 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6898
6899 /* The slot time is changed by tg3_setup_phy if we
6900 * run at gigabit with half duplex.
6901 */
6902 tw32(MAC_TX_LENGTHS,
6903 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6904 (6 << TX_LENGTHS_IPG_SHIFT) |
6905 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6906
6907 /* Receive rules. */
6908 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6909 tw32(RCVLPC_CONFIG, 0x0181);
6910
6911 /* Calculate RDMAC_MODE setting early, we need it to determine
6912 * the RCVLPC_STATE_ENABLE mask.
6913 */
6914 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6915 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6916 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6917 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6918 RDMAC_MODE_LNGREAD_ENAB);
6919
6920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6921 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6922 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6923 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6924
6925 /* If statement applies to 5705 and 5750 PCI devices only */
6926 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6927 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6928 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6929 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6931 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6932 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6933 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6934 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6935 }
6936 }
6937
6938 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6939 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6940
6941 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6942 rdmac_mode |= (1 << 27);
6943
6944 /* Receive/send statistics. */
6945 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6946 val = tr32(RCVLPC_STATS_ENABLE);
6947 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6948 tw32(RCVLPC_STATS_ENABLE, val);
6949 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6950 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6951 val = tr32(RCVLPC_STATS_ENABLE);
6952 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6953 tw32(RCVLPC_STATS_ENABLE, val);
6954 } else {
6955 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6956 }
6957 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6958 tw32(SNDDATAI_STATSENAB, 0xffffff);
6959 tw32(SNDDATAI_STATSCTRL,
6960 (SNDDATAI_SCTRL_ENABLE |
6961 SNDDATAI_SCTRL_FASTUPD));
6962
6963 /* Setup host coalescing engine. */
6964 tw32(HOSTCC_MODE, 0);
6965 for (i = 0; i < 2000; i++) {
6966 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6967 break;
6968 udelay(10);
6969 }
6970
6971 __tg3_set_coalesce(tp, &tp->coal);
6972
6973 /* set status block DMA address */
6974 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6975 ((u64) tp->status_mapping >> 32));
6976 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6977 ((u64) tp->status_mapping & 0xffffffff));
6978
6979 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6980 /* Status/statistics block address. See tg3_timer,
6981 * the tg3_periodic_fetch_stats call there, and
6982 * tg3_get_stats to see how this works for 5705/5750 chips.
6983 */
6984 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6985 ((u64) tp->stats_mapping >> 32));
6986 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6987 ((u64) tp->stats_mapping & 0xffffffff));
6988 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6989 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6990 }
6991
6992 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6993
6994 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6995 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6996 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6997 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6998
6999 /* Clear statistics/status block in chip, and status block in ram. */
7000 for (i = NIC_SRAM_STATS_BLK;
7001 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7002 i += sizeof(u32)) {
7003 tg3_write_mem(tp, i, 0);
7004 udelay(40);
7005 }
7006 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7007
7008 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7009 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7010 /* reset to prevent losing 1st rx packet intermittently */
7011 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7012 udelay(10);
7013 }
7014
7015 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7016 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7017 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7018 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7019 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7020 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7021 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7022 udelay(40);
7023
7024 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7025 * If TG3_FLG2_IS_NIC is zero, we should read the
7026 * register to preserve the GPIO settings for LOMs. The GPIOs,
7027 * whether used as inputs or outputs, are set by boot code after
7028 * reset.
7029 */
7030 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7031 u32 gpio_mask;
7032
7033 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7034 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7035 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7036
7037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7038 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7039 GRC_LCLCTRL_GPIO_OUTPUT3;
7040
7041 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7042 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7043
7044 tp->grc_local_ctrl &= ~gpio_mask;
7045 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7046
7047 /* GPIO1 must be driven high for eeprom write protect */
7048 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7049 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7050 GRC_LCLCTRL_GPIO_OUTPUT1);
7051 }
7052 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7053 udelay(100);
7054
7055 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7056 tp->last_tag = 0;
7057
7058 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7059 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7060 udelay(40);
7061 }
7062
7063 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7064 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7065 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7066 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7067 WDMAC_MODE_LNGREAD_ENAB);
7068
7069 /* If statement applies to 5705 and 5750 PCI devices only */
7070 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7071 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7072 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7073 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7074 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7075 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7076 /* nothing */
7077 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7078 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7079 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7080 val |= WDMAC_MODE_RX_ACCEL;
7081 }
7082 }
7083
7084 /* Enable host coalescing bug fix */
7085 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7086 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7087 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7088 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
7089 val |= WDMAC_MODE_STATUS_TAG_FIX;
7090
7091 tw32_f(WDMAC_MODE, val);
7092 udelay(40);
7093
7094 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7095 u16 pcix_cmd;
7096
7097 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7098 &pcix_cmd);
7099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7100 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7101 pcix_cmd |= PCI_X_CMD_READ_2K;
7102 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7103 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7104 pcix_cmd |= PCI_X_CMD_READ_2K;
7105 }
7106 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7107 pcix_cmd);
7108 }
7109
7110 tw32_f(RDMAC_MODE, rdmac_mode);
7111 udelay(40);
7112
7113 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7114 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7115 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7116
7117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7118 tw32(SNDDATAC_MODE,
7119 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7120 else
7121 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7122
7123 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7124 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7125 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7126 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7127 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7128 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7129 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7130 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7131
7132 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7133 err = tg3_load_5701_a0_firmware_fix(tp);
7134 if (err)
7135 return err;
7136 }
7137
7138 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7139 err = tg3_load_tso_firmware(tp);
7140 if (err)
7141 return err;
7142 }
7143
7144 tp->tx_mode = TX_MODE_ENABLE;
7145 tw32_f(MAC_TX_MODE, tp->tx_mode);
7146 udelay(100);
7147
7148 tp->rx_mode = RX_MODE_ENABLE;
7149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7151 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7152
7153 tw32_f(MAC_RX_MODE, tp->rx_mode);
7154 udelay(10);
7155
7156 if (tp->link_config.phy_is_low_power) {
7157 tp->link_config.phy_is_low_power = 0;
7158 tp->link_config.speed = tp->link_config.orig_speed;
7159 tp->link_config.duplex = tp->link_config.orig_duplex;
7160 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7161 }
7162
7163 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
7164 tw32_f(MAC_MI_MODE, tp->mi_mode);
7165 udelay(80);
7166
7167 tw32(MAC_LED_CTRL, tp->led_ctrl);
7168
7169 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7170 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7171 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7172 udelay(10);
7173 }
7174 tw32_f(MAC_RX_MODE, tp->rx_mode);
7175 udelay(10);
7176
7177 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7178 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7179 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7180 /* Set drive transmission level to 1.2V */
7181 /* only if the signal pre-emphasis bit is not set */
7182 val = tr32(MAC_SERDES_CFG);
7183 val &= 0xfffff000;
7184 val |= 0x880;
7185 tw32(MAC_SERDES_CFG, val);
7186 }
7187 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7188 tw32(MAC_SERDES_CFG, 0x616000);
7189 }
7190
7191 /* Prevent chip from dropping frames when flow control
7192 * is enabled.
7193 */
7194 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7195
7196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7197 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7198 /* Use hardware link auto-negotiation */
7199 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7200 }
7201
7202 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7203 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7204 u32 tmp;
7205
7206 tmp = tr32(SERDES_RX_CTRL);
7207 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7208 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7209 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7210 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7211 }
7212
7213 err = tg3_setup_phy(tp, 0);
7214 if (err)
7215 return err;
7216
7217 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7218 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7219 u32 tmp;
7220
7221 /* Clear CRC stats. */
7222 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7223 tg3_writephy(tp, MII_TG3_TEST1,
7224 tmp | MII_TG3_TEST1_CRC_EN);
7225 tg3_readphy(tp, 0x14, &tmp);
7226 }
7227 }
7228
7229 __tg3_set_rx_mode(tp->dev);
7230
7231 /* Initialize receive rules. */
7232 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7233 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7234 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7235 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7236
7237 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7238 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7239 limit = 8;
7240 else
7241 limit = 16;
7242 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7243 limit -= 4;
7244 switch (limit) {
7245 case 16:
7246 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7247 case 15:
7248 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7249 case 14:
7250 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7251 case 13:
7252 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7253 case 12:
7254 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7255 case 11:
7256 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7257 case 10:
7258 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7259 case 9:
7260 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7261 case 8:
7262 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7263 case 7:
7264 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7265 case 6:
7266 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7267 case 5:
7268 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7269 case 4:
7270 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7271 case 3:
7272 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7273 case 2:
7274 case 1:
7275
7276 default:
7277 break;
7278 };
7279
7280 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7281 /* Write our heartbeat update interval to APE. */
7282 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7283 APE_HOST_HEARTBEAT_INT_DISABLE);
7284
7285 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7286
7287 return 0;
7288 }
7289
7290 /* Called at device open time to get the chip ready for
7291 * packet processing. Invoked with tp->lock held.
7292 */
7293 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7294 {
7295 int err;
7296
7297 /* Force the chip into D0. */
7298 err = tg3_set_power_state(tp, PCI_D0);
7299 if (err)
7300 goto out;
7301
7302 tg3_switch_clocks(tp);
7303
7304 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7305
7306 err = tg3_reset_hw(tp, reset_phy);
7307
7308 out:
7309 return err;
7310 }
7311
7312 #define TG3_STAT_ADD32(PSTAT, REG) \
7313 do { u32 __val = tr32(REG); \
7314 (PSTAT)->low += __val; \
7315 if ((PSTAT)->low < __val) \
7316 (PSTAT)->high += 1; \
7317 } while (0)
7318
7319 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7320 {
7321 struct tg3_hw_stats *sp = tp->hw_stats;
7322
7323 if (!netif_carrier_ok(tp->dev))
7324 return;
7325
7326 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7327 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7328 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7329 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7330 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7331 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7332 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7333 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7334 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7335 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7336 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7337 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7338 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7339
7340 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7341 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7342 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7343 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7344 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7345 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7346 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7347 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7348 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7349 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7350 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7351 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7352 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7353 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7354
7355 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7356 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7357 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7358 }
7359
7360 static void tg3_timer(unsigned long __opaque)
7361 {
7362 struct tg3 *tp = (struct tg3 *) __opaque;
7363
7364 if (tp->irq_sync)
7365 goto restart_timer;
7366
7367 spin_lock(&tp->lock);
7368
7369 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7370 /* All of this garbage is because when using non-tagged
7371 * IRQ status the mailbox/status_block protocol the chip
7372 * uses with the cpu is race prone.
7373 */
7374 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7375 tw32(GRC_LOCAL_CTRL,
7376 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7377 } else {
7378 tw32(HOSTCC_MODE, tp->coalesce_mode |
7379 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7380 }
7381
7382 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7383 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7384 spin_unlock(&tp->lock);
7385 schedule_work(&tp->reset_task);
7386 return;
7387 }
7388 }
7389
7390 /* This part only runs once per second. */
7391 if (!--tp->timer_counter) {
7392 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7393 tg3_periodic_fetch_stats(tp);
7394
7395 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7396 u32 mac_stat;
7397 int phy_event;
7398
7399 mac_stat = tr32(MAC_STATUS);
7400
7401 phy_event = 0;
7402 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7403 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7404 phy_event = 1;
7405 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7406 phy_event = 1;
7407
7408 if (phy_event)
7409 tg3_setup_phy(tp, 0);
7410 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7411 u32 mac_stat = tr32(MAC_STATUS);
7412 int need_setup = 0;
7413
7414 if (netif_carrier_ok(tp->dev) &&
7415 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7416 need_setup = 1;
7417 }
7418 if (! netif_carrier_ok(tp->dev) &&
7419 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7420 MAC_STATUS_SIGNAL_DET))) {
7421 need_setup = 1;
7422 }
7423 if (need_setup) {
7424 if (!tp->serdes_counter) {
7425 tw32_f(MAC_MODE,
7426 (tp->mac_mode &
7427 ~MAC_MODE_PORT_MODE_MASK));
7428 udelay(40);
7429 tw32_f(MAC_MODE, tp->mac_mode);
7430 udelay(40);
7431 }
7432 tg3_setup_phy(tp, 0);
7433 }
7434 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7435 tg3_serdes_parallel_detect(tp);
7436
7437 tp->timer_counter = tp->timer_multiplier;
7438 }
7439
7440 /* Heartbeat is only sent once every 2 seconds.
7441 *
7442 * The heartbeat is to tell the ASF firmware that the host
7443 * driver is still alive. In the event that the OS crashes,
7444 * ASF needs to reset the hardware to free up the FIFO space
7445 * that may be filled with rx packets destined for the host.
7446 * If the FIFO is full, ASF will no longer function properly.
7447 *
7448 * Unintended resets have been reported on real time kernels
7449 * where the timer doesn't run on time. Netpoll will also have
7450 * same problem.
7451 *
7452 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7453 * to check the ring condition when the heartbeat is expiring
7454 * before doing the reset. This will prevent most unintended
7455 * resets.
7456 */
7457 if (!--tp->asf_counter) {
7458 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7459 u32 val;
7460
7461 tg3_wait_for_event_ack(tp);
7462
7463 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7464 FWCMD_NICDRV_ALIVE3);
7465 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7466 /* 5 seconds timeout */
7467 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7468 val = tr32(GRC_RX_CPU_EVENT);
7469 val |= GRC_RX_CPU_DRIVER_EVENT;
7470 tw32_f(GRC_RX_CPU_EVENT, val);
7471 }
7472 tp->asf_counter = tp->asf_multiplier;
7473 }
7474
7475 spin_unlock(&tp->lock);
7476
7477 restart_timer:
7478 tp->timer.expires = jiffies + tp->timer_offset;
7479 add_timer(&tp->timer);
7480 }
7481
7482 static int tg3_request_irq(struct tg3 *tp)
7483 {
7484 irq_handler_t fn;
7485 unsigned long flags;
7486 struct net_device *dev = tp->dev;
7487
7488 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7489 fn = tg3_msi;
7490 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7491 fn = tg3_msi_1shot;
7492 flags = IRQF_SAMPLE_RANDOM;
7493 } else {
7494 fn = tg3_interrupt;
7495 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7496 fn = tg3_interrupt_tagged;
7497 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7498 }
7499 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7500 }
7501
7502 static int tg3_test_interrupt(struct tg3 *tp)
7503 {
7504 struct net_device *dev = tp->dev;
7505 int err, i, intr_ok = 0;
7506
7507 if (!netif_running(dev))
7508 return -ENODEV;
7509
7510 tg3_disable_ints(tp);
7511
7512 free_irq(tp->pdev->irq, dev);
7513
7514 err = request_irq(tp->pdev->irq, tg3_test_isr,
7515 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7516 if (err)
7517 return err;
7518
7519 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7520 tg3_enable_ints(tp);
7521
7522 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7523 HOSTCC_MODE_NOW);
7524
7525 for (i = 0; i < 5; i++) {
7526 u32 int_mbox, misc_host_ctrl;
7527
7528 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7529 TG3_64BIT_REG_LOW);
7530 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7531
7532 if ((int_mbox != 0) ||
7533 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7534 intr_ok = 1;
7535 break;
7536 }
7537
7538 msleep(10);
7539 }
7540
7541 tg3_disable_ints(tp);
7542
7543 free_irq(tp->pdev->irq, dev);
7544
7545 err = tg3_request_irq(tp);
7546
7547 if (err)
7548 return err;
7549
7550 if (intr_ok)
7551 return 0;
7552
7553 return -EIO;
7554 }
7555
7556 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7557 * successfully restored
7558 */
7559 static int tg3_test_msi(struct tg3 *tp)
7560 {
7561 struct net_device *dev = tp->dev;
7562 int err;
7563 u16 pci_cmd;
7564
7565 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7566 return 0;
7567
7568 /* Turn off SERR reporting in case MSI terminates with Master
7569 * Abort.
7570 */
7571 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7572 pci_write_config_word(tp->pdev, PCI_COMMAND,
7573 pci_cmd & ~PCI_COMMAND_SERR);
7574
7575 err = tg3_test_interrupt(tp);
7576
7577 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7578
7579 if (!err)
7580 return 0;
7581
7582 /* other failures */
7583 if (err != -EIO)
7584 return err;
7585
7586 /* MSI test failed, go back to INTx mode */
7587 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7588 "switching to INTx mode. Please report this failure to "
7589 "the PCI maintainer and include system chipset information.\n",
7590 tp->dev->name);
7591
7592 free_irq(tp->pdev->irq, dev);
7593 pci_disable_msi(tp->pdev);
7594
7595 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7596
7597 err = tg3_request_irq(tp);
7598 if (err)
7599 return err;
7600
7601 /* Need to reset the chip because the MSI cycle may have terminated
7602 * with Master Abort.
7603 */
7604 tg3_full_lock(tp, 1);
7605
7606 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7607 err = tg3_init_hw(tp, 1);
7608
7609 tg3_full_unlock(tp);
7610
7611 if (err)
7612 free_irq(tp->pdev->irq, dev);
7613
7614 return err;
7615 }
7616
7617 static int tg3_open(struct net_device *dev)
7618 {
7619 struct tg3 *tp = netdev_priv(dev);
7620 int err;
7621
7622 netif_carrier_off(tp->dev);
7623
7624 tg3_full_lock(tp, 0);
7625
7626 err = tg3_set_power_state(tp, PCI_D0);
7627 if (err) {
7628 tg3_full_unlock(tp);
7629 return err;
7630 }
7631
7632 tg3_disable_ints(tp);
7633 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7634
7635 tg3_full_unlock(tp);
7636
7637 /* The placement of this call is tied
7638 * to the setup and use of Host TX descriptors.
7639 */
7640 err = tg3_alloc_consistent(tp);
7641 if (err)
7642 return err;
7643
7644 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7645 /* All MSI supporting chips should support tagged
7646 * status. Assert that this is the case.
7647 */
7648 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7649 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7650 "Not using MSI.\n", tp->dev->name);
7651 } else if (pci_enable_msi(tp->pdev) == 0) {
7652 u32 msi_mode;
7653
7654 msi_mode = tr32(MSGINT_MODE);
7655 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7656 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7657 }
7658 }
7659 err = tg3_request_irq(tp);
7660
7661 if (err) {
7662 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7663 pci_disable_msi(tp->pdev);
7664 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7665 }
7666 tg3_free_consistent(tp);
7667 return err;
7668 }
7669
7670 napi_enable(&tp->napi);
7671
7672 tg3_full_lock(tp, 0);
7673
7674 err = tg3_init_hw(tp, 1);
7675 if (err) {
7676 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7677 tg3_free_rings(tp);
7678 } else {
7679 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7680 tp->timer_offset = HZ;
7681 else
7682 tp->timer_offset = HZ / 10;
7683
7684 BUG_ON(tp->timer_offset > HZ);
7685 tp->timer_counter = tp->timer_multiplier =
7686 (HZ / tp->timer_offset);
7687 tp->asf_counter = tp->asf_multiplier =
7688 ((HZ / tp->timer_offset) * 2);
7689
7690 init_timer(&tp->timer);
7691 tp->timer.expires = jiffies + tp->timer_offset;
7692 tp->timer.data = (unsigned long) tp;
7693 tp->timer.function = tg3_timer;
7694 }
7695
7696 tg3_full_unlock(tp);
7697
7698 if (err) {
7699 napi_disable(&tp->napi);
7700 free_irq(tp->pdev->irq, dev);
7701 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7702 pci_disable_msi(tp->pdev);
7703 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7704 }
7705 tg3_free_consistent(tp);
7706 return err;
7707 }
7708
7709 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7710 err = tg3_test_msi(tp);
7711
7712 if (err) {
7713 tg3_full_lock(tp, 0);
7714
7715 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7716 pci_disable_msi(tp->pdev);
7717 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7718 }
7719 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7720 tg3_free_rings(tp);
7721 tg3_free_consistent(tp);
7722
7723 tg3_full_unlock(tp);
7724
7725 napi_disable(&tp->napi);
7726
7727 return err;
7728 }
7729
7730 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7731 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7732 u32 val = tr32(PCIE_TRANSACTION_CFG);
7733
7734 tw32(PCIE_TRANSACTION_CFG,
7735 val | PCIE_TRANS_CFG_1SHOT_MSI);
7736 }
7737 }
7738 }
7739
7740 tg3_full_lock(tp, 0);
7741
7742 add_timer(&tp->timer);
7743 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7744 tg3_enable_ints(tp);
7745
7746 tg3_full_unlock(tp);
7747
7748 netif_start_queue(dev);
7749
7750 return 0;
7751 }
7752
7753 #if 0
7754 /*static*/ void tg3_dump_state(struct tg3 *tp)
7755 {
7756 u32 val32, val32_2, val32_3, val32_4, val32_5;
7757 u16 val16;
7758 int i;
7759
7760 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7761 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7762 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7763 val16, val32);
7764
7765 /* MAC block */
7766 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7767 tr32(MAC_MODE), tr32(MAC_STATUS));
7768 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7769 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7770 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7771 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7772 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7773 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7774
7775 /* Send data initiator control block */
7776 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7777 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7778 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7779 tr32(SNDDATAI_STATSCTRL));
7780
7781 /* Send data completion control block */
7782 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7783
7784 /* Send BD ring selector block */
7785 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7786 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7787
7788 /* Send BD initiator control block */
7789 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7790 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7791
7792 /* Send BD completion control block */
7793 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7794
7795 /* Receive list placement control block */
7796 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7797 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7798 printk(" RCVLPC_STATSCTRL[%08x]\n",
7799 tr32(RCVLPC_STATSCTRL));
7800
7801 /* Receive data and receive BD initiator control block */
7802 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7803 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7804
7805 /* Receive data completion control block */
7806 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7807 tr32(RCVDCC_MODE));
7808
7809 /* Receive BD initiator control block */
7810 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7811 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7812
7813 /* Receive BD completion control block */
7814 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7815 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7816
7817 /* Receive list selector control block */
7818 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7819 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7820
7821 /* Mbuf cluster free block */
7822 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7823 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7824
7825 /* Host coalescing control block */
7826 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7827 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7828 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7829 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7830 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7831 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7832 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7833 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7834 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7835 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7836 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7837 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7838
7839 /* Memory arbiter control block */
7840 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7841 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7842
7843 /* Buffer manager control block */
7844 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7845 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7846 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7847 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7848 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7849 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7850 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7851 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7852
7853 /* Read DMA control block */
7854 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7855 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7856
7857 /* Write DMA control block */
7858 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7859 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7860
7861 /* DMA completion block */
7862 printk("DEBUG: DMAC_MODE[%08x]\n",
7863 tr32(DMAC_MODE));
7864
7865 /* GRC block */
7866 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7867 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7868 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7869 tr32(GRC_LOCAL_CTRL));
7870
7871 /* TG3_BDINFOs */
7872 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7873 tr32(RCVDBDI_JUMBO_BD + 0x0),
7874 tr32(RCVDBDI_JUMBO_BD + 0x4),
7875 tr32(RCVDBDI_JUMBO_BD + 0x8),
7876 tr32(RCVDBDI_JUMBO_BD + 0xc));
7877 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7878 tr32(RCVDBDI_STD_BD + 0x0),
7879 tr32(RCVDBDI_STD_BD + 0x4),
7880 tr32(RCVDBDI_STD_BD + 0x8),
7881 tr32(RCVDBDI_STD_BD + 0xc));
7882 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7883 tr32(RCVDBDI_MINI_BD + 0x0),
7884 tr32(RCVDBDI_MINI_BD + 0x4),
7885 tr32(RCVDBDI_MINI_BD + 0x8),
7886 tr32(RCVDBDI_MINI_BD + 0xc));
7887
7888 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7889 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7890 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7891 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7892 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7893 val32, val32_2, val32_3, val32_4);
7894
7895 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7896 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7897 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7898 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7899 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7900 val32, val32_2, val32_3, val32_4);
7901
7902 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7903 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7904 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7905 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7906 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7907 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7908 val32, val32_2, val32_3, val32_4, val32_5);
7909
7910 /* SW status block */
7911 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7912 tp->hw_status->status,
7913 tp->hw_status->status_tag,
7914 tp->hw_status->rx_jumbo_consumer,
7915 tp->hw_status->rx_consumer,
7916 tp->hw_status->rx_mini_consumer,
7917 tp->hw_status->idx[0].rx_producer,
7918 tp->hw_status->idx[0].tx_consumer);
7919
7920 /* SW statistics block */
7921 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7922 ((u32 *)tp->hw_stats)[0],
7923 ((u32 *)tp->hw_stats)[1],
7924 ((u32 *)tp->hw_stats)[2],
7925 ((u32 *)tp->hw_stats)[3]);
7926
7927 /* Mailboxes */
7928 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7929 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7930 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7931 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7932 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7933
7934 /* NIC side send descriptors. */
7935 for (i = 0; i < 6; i++) {
7936 unsigned long txd;
7937
7938 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7939 + (i * sizeof(struct tg3_tx_buffer_desc));
7940 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7941 i,
7942 readl(txd + 0x0), readl(txd + 0x4),
7943 readl(txd + 0x8), readl(txd + 0xc));
7944 }
7945
7946 /* NIC side RX descriptors. */
7947 for (i = 0; i < 6; i++) {
7948 unsigned long rxd;
7949
7950 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7951 + (i * sizeof(struct tg3_rx_buffer_desc));
7952 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7953 i,
7954 readl(rxd + 0x0), readl(rxd + 0x4),
7955 readl(rxd + 0x8), readl(rxd + 0xc));
7956 rxd += (4 * sizeof(u32));
7957 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7958 i,
7959 readl(rxd + 0x0), readl(rxd + 0x4),
7960 readl(rxd + 0x8), readl(rxd + 0xc));
7961 }
7962
7963 for (i = 0; i < 6; i++) {
7964 unsigned long rxd;
7965
7966 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7967 + (i * sizeof(struct tg3_rx_buffer_desc));
7968 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7969 i,
7970 readl(rxd + 0x0), readl(rxd + 0x4),
7971 readl(rxd + 0x8), readl(rxd + 0xc));
7972 rxd += (4 * sizeof(u32));
7973 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7974 i,
7975 readl(rxd + 0x0), readl(rxd + 0x4),
7976 readl(rxd + 0x8), readl(rxd + 0xc));
7977 }
7978 }
7979 #endif
7980
7981 static struct net_device_stats *tg3_get_stats(struct net_device *);
7982 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7983
7984 static int tg3_close(struct net_device *dev)
7985 {
7986 struct tg3 *tp = netdev_priv(dev);
7987
7988 napi_disable(&tp->napi);
7989 cancel_work_sync(&tp->reset_task);
7990
7991 netif_stop_queue(dev);
7992
7993 del_timer_sync(&tp->timer);
7994
7995 tg3_full_lock(tp, 1);
7996 #if 0
7997 tg3_dump_state(tp);
7998 #endif
7999
8000 tg3_disable_ints(tp);
8001
8002 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8003 tg3_free_rings(tp);
8004 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8005
8006 tg3_full_unlock(tp);
8007
8008 free_irq(tp->pdev->irq, dev);
8009 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8010 pci_disable_msi(tp->pdev);
8011 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8012 }
8013
8014 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8015 sizeof(tp->net_stats_prev));
8016 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8017 sizeof(tp->estats_prev));
8018
8019 tg3_free_consistent(tp);
8020
8021 tg3_set_power_state(tp, PCI_D3hot);
8022
8023 netif_carrier_off(tp->dev);
8024
8025 return 0;
8026 }
8027
8028 static inline unsigned long get_stat64(tg3_stat64_t *val)
8029 {
8030 unsigned long ret;
8031
8032 #if (BITS_PER_LONG == 32)
8033 ret = val->low;
8034 #else
8035 ret = ((u64)val->high << 32) | ((u64)val->low);
8036 #endif
8037 return ret;
8038 }
8039
8040 static unsigned long calc_crc_errors(struct tg3 *tp)
8041 {
8042 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8043
8044 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8045 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8047 u32 val;
8048
8049 spin_lock_bh(&tp->lock);
8050 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8051 tg3_writephy(tp, MII_TG3_TEST1,
8052 val | MII_TG3_TEST1_CRC_EN);
8053 tg3_readphy(tp, 0x14, &val);
8054 } else
8055 val = 0;
8056 spin_unlock_bh(&tp->lock);
8057
8058 tp->phy_crc_errors += val;
8059
8060 return tp->phy_crc_errors;
8061 }
8062
8063 return get_stat64(&hw_stats->rx_fcs_errors);
8064 }
8065
8066 #define ESTAT_ADD(member) \
8067 estats->member = old_estats->member + \
8068 get_stat64(&hw_stats->member)
8069
8070 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8071 {
8072 struct tg3_ethtool_stats *estats = &tp->estats;
8073 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8074 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8075
8076 if (!hw_stats)
8077 return old_estats;
8078
8079 ESTAT_ADD(rx_octets);
8080 ESTAT_ADD(rx_fragments);
8081 ESTAT_ADD(rx_ucast_packets);
8082 ESTAT_ADD(rx_mcast_packets);
8083 ESTAT_ADD(rx_bcast_packets);
8084 ESTAT_ADD(rx_fcs_errors);
8085 ESTAT_ADD(rx_align_errors);
8086 ESTAT_ADD(rx_xon_pause_rcvd);
8087 ESTAT_ADD(rx_xoff_pause_rcvd);
8088 ESTAT_ADD(rx_mac_ctrl_rcvd);
8089 ESTAT_ADD(rx_xoff_entered);
8090 ESTAT_ADD(rx_frame_too_long_errors);
8091 ESTAT_ADD(rx_jabbers);
8092 ESTAT_ADD(rx_undersize_packets);
8093 ESTAT_ADD(rx_in_length_errors);
8094 ESTAT_ADD(rx_out_length_errors);
8095 ESTAT_ADD(rx_64_or_less_octet_packets);
8096 ESTAT_ADD(rx_65_to_127_octet_packets);
8097 ESTAT_ADD(rx_128_to_255_octet_packets);
8098 ESTAT_ADD(rx_256_to_511_octet_packets);
8099 ESTAT_ADD(rx_512_to_1023_octet_packets);
8100 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8101 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8102 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8103 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8104 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8105
8106 ESTAT_ADD(tx_octets);
8107 ESTAT_ADD(tx_collisions);
8108 ESTAT_ADD(tx_xon_sent);
8109 ESTAT_ADD(tx_xoff_sent);
8110 ESTAT_ADD(tx_flow_control);
8111 ESTAT_ADD(tx_mac_errors);
8112 ESTAT_ADD(tx_single_collisions);
8113 ESTAT_ADD(tx_mult_collisions);
8114 ESTAT_ADD(tx_deferred);
8115 ESTAT_ADD(tx_excessive_collisions);
8116 ESTAT_ADD(tx_late_collisions);
8117 ESTAT_ADD(tx_collide_2times);
8118 ESTAT_ADD(tx_collide_3times);
8119 ESTAT_ADD(tx_collide_4times);
8120 ESTAT_ADD(tx_collide_5times);
8121 ESTAT_ADD(tx_collide_6times);
8122 ESTAT_ADD(tx_collide_7times);
8123 ESTAT_ADD(tx_collide_8times);
8124 ESTAT_ADD(tx_collide_9times);
8125 ESTAT_ADD(tx_collide_10times);
8126 ESTAT_ADD(tx_collide_11times);
8127 ESTAT_ADD(tx_collide_12times);
8128 ESTAT_ADD(tx_collide_13times);
8129 ESTAT_ADD(tx_collide_14times);
8130 ESTAT_ADD(tx_collide_15times);
8131 ESTAT_ADD(tx_ucast_packets);
8132 ESTAT_ADD(tx_mcast_packets);
8133 ESTAT_ADD(tx_bcast_packets);
8134 ESTAT_ADD(tx_carrier_sense_errors);
8135 ESTAT_ADD(tx_discards);
8136 ESTAT_ADD(tx_errors);
8137
8138 ESTAT_ADD(dma_writeq_full);
8139 ESTAT_ADD(dma_write_prioq_full);
8140 ESTAT_ADD(rxbds_empty);
8141 ESTAT_ADD(rx_discards);
8142 ESTAT_ADD(rx_errors);
8143 ESTAT_ADD(rx_threshold_hit);
8144
8145 ESTAT_ADD(dma_readq_full);
8146 ESTAT_ADD(dma_read_prioq_full);
8147 ESTAT_ADD(tx_comp_queue_full);
8148
8149 ESTAT_ADD(ring_set_send_prod_index);
8150 ESTAT_ADD(ring_status_update);
8151 ESTAT_ADD(nic_irqs);
8152 ESTAT_ADD(nic_avoided_irqs);
8153 ESTAT_ADD(nic_tx_threshold_hit);
8154
8155 return estats;
8156 }
8157
8158 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8159 {
8160 struct tg3 *tp = netdev_priv(dev);
8161 struct net_device_stats *stats = &tp->net_stats;
8162 struct net_device_stats *old_stats = &tp->net_stats_prev;
8163 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8164
8165 if (!hw_stats)
8166 return old_stats;
8167
8168 stats->rx_packets = old_stats->rx_packets +
8169 get_stat64(&hw_stats->rx_ucast_packets) +
8170 get_stat64(&hw_stats->rx_mcast_packets) +
8171 get_stat64(&hw_stats->rx_bcast_packets);
8172
8173 stats->tx_packets = old_stats->tx_packets +
8174 get_stat64(&hw_stats->tx_ucast_packets) +
8175 get_stat64(&hw_stats->tx_mcast_packets) +
8176 get_stat64(&hw_stats->tx_bcast_packets);
8177
8178 stats->rx_bytes = old_stats->rx_bytes +
8179 get_stat64(&hw_stats->rx_octets);
8180 stats->tx_bytes = old_stats->tx_bytes +
8181 get_stat64(&hw_stats->tx_octets);
8182
8183 stats->rx_errors = old_stats->rx_errors +
8184 get_stat64(&hw_stats->rx_errors);
8185 stats->tx_errors = old_stats->tx_errors +
8186 get_stat64(&hw_stats->tx_errors) +
8187 get_stat64(&hw_stats->tx_mac_errors) +
8188 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8189 get_stat64(&hw_stats->tx_discards);
8190
8191 stats->multicast = old_stats->multicast +
8192 get_stat64(&hw_stats->rx_mcast_packets);
8193 stats->collisions = old_stats->collisions +
8194 get_stat64(&hw_stats->tx_collisions);
8195
8196 stats->rx_length_errors = old_stats->rx_length_errors +
8197 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8198 get_stat64(&hw_stats->rx_undersize_packets);
8199
8200 stats->rx_over_errors = old_stats->rx_over_errors +
8201 get_stat64(&hw_stats->rxbds_empty);
8202 stats->rx_frame_errors = old_stats->rx_frame_errors +
8203 get_stat64(&hw_stats->rx_align_errors);
8204 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8205 get_stat64(&hw_stats->tx_discards);
8206 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8207 get_stat64(&hw_stats->tx_carrier_sense_errors);
8208
8209 stats->rx_crc_errors = old_stats->rx_crc_errors +
8210 calc_crc_errors(tp);
8211
8212 stats->rx_missed_errors = old_stats->rx_missed_errors +
8213 get_stat64(&hw_stats->rx_discards);
8214
8215 return stats;
8216 }
8217
8218 static inline u32 calc_crc(unsigned char *buf, int len)
8219 {
8220 u32 reg;
8221 u32 tmp;
8222 int j, k;
8223
8224 reg = 0xffffffff;
8225
8226 for (j = 0; j < len; j++) {
8227 reg ^= buf[j];
8228
8229 for (k = 0; k < 8; k++) {
8230 tmp = reg & 0x01;
8231
8232 reg >>= 1;
8233
8234 if (tmp) {
8235 reg ^= 0xedb88320;
8236 }
8237 }
8238 }
8239
8240 return ~reg;
8241 }
8242
8243 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8244 {
8245 /* accept or reject all multicast frames */
8246 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8247 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8248 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8249 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8250 }
8251
8252 static void __tg3_set_rx_mode(struct net_device *dev)
8253 {
8254 struct tg3 *tp = netdev_priv(dev);
8255 u32 rx_mode;
8256
8257 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8258 RX_MODE_KEEP_VLAN_TAG);
8259
8260 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8261 * flag clear.
8262 */
8263 #if TG3_VLAN_TAG_USED
8264 if (!tp->vlgrp &&
8265 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8266 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8267 #else
8268 /* By definition, VLAN is disabled always in this
8269 * case.
8270 */
8271 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8272 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8273 #endif
8274
8275 if (dev->flags & IFF_PROMISC) {
8276 /* Promiscuous mode. */
8277 rx_mode |= RX_MODE_PROMISC;
8278 } else if (dev->flags & IFF_ALLMULTI) {
8279 /* Accept all multicast. */
8280 tg3_set_multi (tp, 1);
8281 } else if (dev->mc_count < 1) {
8282 /* Reject all multicast. */
8283 tg3_set_multi (tp, 0);
8284 } else {
8285 /* Accept one or more multicast(s). */
8286 struct dev_mc_list *mclist;
8287 unsigned int i;
8288 u32 mc_filter[4] = { 0, };
8289 u32 regidx;
8290 u32 bit;
8291 u32 crc;
8292
8293 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8294 i++, mclist = mclist->next) {
8295
8296 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8297 bit = ~crc & 0x7f;
8298 regidx = (bit & 0x60) >> 5;
8299 bit &= 0x1f;
8300 mc_filter[regidx] |= (1 << bit);
8301 }
8302
8303 tw32(MAC_HASH_REG_0, mc_filter[0]);
8304 tw32(MAC_HASH_REG_1, mc_filter[1]);
8305 tw32(MAC_HASH_REG_2, mc_filter[2]);
8306 tw32(MAC_HASH_REG_3, mc_filter[3]);
8307 }
8308
8309 if (rx_mode != tp->rx_mode) {
8310 tp->rx_mode = rx_mode;
8311 tw32_f(MAC_RX_MODE, rx_mode);
8312 udelay(10);
8313 }
8314 }
8315
8316 static void tg3_set_rx_mode(struct net_device *dev)
8317 {
8318 struct tg3 *tp = netdev_priv(dev);
8319
8320 if (!netif_running(dev))
8321 return;
8322
8323 tg3_full_lock(tp, 0);
8324 __tg3_set_rx_mode(dev);
8325 tg3_full_unlock(tp);
8326 }
8327
8328 #define TG3_REGDUMP_LEN (32 * 1024)
8329
8330 static int tg3_get_regs_len(struct net_device *dev)
8331 {
8332 return TG3_REGDUMP_LEN;
8333 }
8334
8335 static void tg3_get_regs(struct net_device *dev,
8336 struct ethtool_regs *regs, void *_p)
8337 {
8338 u32 *p = _p;
8339 struct tg3 *tp = netdev_priv(dev);
8340 u8 *orig_p = _p;
8341 int i;
8342
8343 regs->version = 0;
8344
8345 memset(p, 0, TG3_REGDUMP_LEN);
8346
8347 if (tp->link_config.phy_is_low_power)
8348 return;
8349
8350 tg3_full_lock(tp, 0);
8351
8352 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8353 #define GET_REG32_LOOP(base,len) \
8354 do { p = (u32 *)(orig_p + (base)); \
8355 for (i = 0; i < len; i += 4) \
8356 __GET_REG32((base) + i); \
8357 } while (0)
8358 #define GET_REG32_1(reg) \
8359 do { p = (u32 *)(orig_p + (reg)); \
8360 __GET_REG32((reg)); \
8361 } while (0)
8362
8363 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8364 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8365 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8366 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8367 GET_REG32_1(SNDDATAC_MODE);
8368 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8369 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8370 GET_REG32_1(SNDBDC_MODE);
8371 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8372 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8373 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8374 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8375 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8376 GET_REG32_1(RCVDCC_MODE);
8377 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8378 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8379 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8380 GET_REG32_1(MBFREE_MODE);
8381 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8382 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8383 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8384 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8385 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8386 GET_REG32_1(RX_CPU_MODE);
8387 GET_REG32_1(RX_CPU_STATE);
8388 GET_REG32_1(RX_CPU_PGMCTR);
8389 GET_REG32_1(RX_CPU_HWBKPT);
8390 GET_REG32_1(TX_CPU_MODE);
8391 GET_REG32_1(TX_CPU_STATE);
8392 GET_REG32_1(TX_CPU_PGMCTR);
8393 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8394 GET_REG32_LOOP(FTQ_RESET, 0x120);
8395 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8396 GET_REG32_1(DMAC_MODE);
8397 GET_REG32_LOOP(GRC_MODE, 0x4c);
8398 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8399 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8400
8401 #undef __GET_REG32
8402 #undef GET_REG32_LOOP
8403 #undef GET_REG32_1
8404
8405 tg3_full_unlock(tp);
8406 }
8407
8408 static int tg3_get_eeprom_len(struct net_device *dev)
8409 {
8410 struct tg3 *tp = netdev_priv(dev);
8411
8412 return tp->nvram_size;
8413 }
8414
8415 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8416 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8417 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8418
8419 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8420 {
8421 struct tg3 *tp = netdev_priv(dev);
8422 int ret;
8423 u8 *pd;
8424 u32 i, offset, len, b_offset, b_count;
8425 __le32 val;
8426
8427 if (tp->link_config.phy_is_low_power)
8428 return -EAGAIN;
8429
8430 offset = eeprom->offset;
8431 len = eeprom->len;
8432 eeprom->len = 0;
8433
8434 eeprom->magic = TG3_EEPROM_MAGIC;
8435
8436 if (offset & 3) {
8437 /* adjustments to start on required 4 byte boundary */
8438 b_offset = offset & 3;
8439 b_count = 4 - b_offset;
8440 if (b_count > len) {
8441 /* i.e. offset=1 len=2 */
8442 b_count = len;
8443 }
8444 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8445 if (ret)
8446 return ret;
8447 memcpy(data, ((char*)&val) + b_offset, b_count);
8448 len -= b_count;
8449 offset += b_count;
8450 eeprom->len += b_count;
8451 }
8452
8453 /* read bytes upto the last 4 byte boundary */
8454 pd = &data[eeprom->len];
8455 for (i = 0; i < (len - (len & 3)); i += 4) {
8456 ret = tg3_nvram_read_le(tp, offset + i, &val);
8457 if (ret) {
8458 eeprom->len += i;
8459 return ret;
8460 }
8461 memcpy(pd + i, &val, 4);
8462 }
8463 eeprom->len += i;
8464
8465 if (len & 3) {
8466 /* read last bytes not ending on 4 byte boundary */
8467 pd = &data[eeprom->len];
8468 b_count = len & 3;
8469 b_offset = offset + len - b_count;
8470 ret = tg3_nvram_read_le(tp, b_offset, &val);
8471 if (ret)
8472 return ret;
8473 memcpy(pd, &val, b_count);
8474 eeprom->len += b_count;
8475 }
8476 return 0;
8477 }
8478
8479 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8480
8481 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8482 {
8483 struct tg3 *tp = netdev_priv(dev);
8484 int ret;
8485 u32 offset, len, b_offset, odd_len;
8486 u8 *buf;
8487 __le32 start, end;
8488
8489 if (tp->link_config.phy_is_low_power)
8490 return -EAGAIN;
8491
8492 if (eeprom->magic != TG3_EEPROM_MAGIC)
8493 return -EINVAL;
8494
8495 offset = eeprom->offset;
8496 len = eeprom->len;
8497
8498 if ((b_offset = (offset & 3))) {
8499 /* adjustments to start on required 4 byte boundary */
8500 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8501 if (ret)
8502 return ret;
8503 len += b_offset;
8504 offset &= ~3;
8505 if (len < 4)
8506 len = 4;
8507 }
8508
8509 odd_len = 0;
8510 if (len & 3) {
8511 /* adjustments to end on required 4 byte boundary */
8512 odd_len = 1;
8513 len = (len + 3) & ~3;
8514 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8515 if (ret)
8516 return ret;
8517 }
8518
8519 buf = data;
8520 if (b_offset || odd_len) {
8521 buf = kmalloc(len, GFP_KERNEL);
8522 if (!buf)
8523 return -ENOMEM;
8524 if (b_offset)
8525 memcpy(buf, &start, 4);
8526 if (odd_len)
8527 memcpy(buf+len-4, &end, 4);
8528 memcpy(buf + b_offset, data, eeprom->len);
8529 }
8530
8531 ret = tg3_nvram_write_block(tp, offset, len, buf);
8532
8533 if (buf != data)
8534 kfree(buf);
8535
8536 return ret;
8537 }
8538
8539 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8540 {
8541 struct tg3 *tp = netdev_priv(dev);
8542
8543 cmd->supported = (SUPPORTED_Autoneg);
8544
8545 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8546 cmd->supported |= (SUPPORTED_1000baseT_Half |
8547 SUPPORTED_1000baseT_Full);
8548
8549 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8550 cmd->supported |= (SUPPORTED_100baseT_Half |
8551 SUPPORTED_100baseT_Full |
8552 SUPPORTED_10baseT_Half |
8553 SUPPORTED_10baseT_Full |
8554 SUPPORTED_TP);
8555 cmd->port = PORT_TP;
8556 } else {
8557 cmd->supported |= SUPPORTED_FIBRE;
8558 cmd->port = PORT_FIBRE;
8559 }
8560
8561 cmd->advertising = tp->link_config.advertising;
8562 if (netif_running(dev)) {
8563 cmd->speed = tp->link_config.active_speed;
8564 cmd->duplex = tp->link_config.active_duplex;
8565 }
8566 cmd->phy_address = PHY_ADDR;
8567 cmd->transceiver = 0;
8568 cmd->autoneg = tp->link_config.autoneg;
8569 cmd->maxtxpkt = 0;
8570 cmd->maxrxpkt = 0;
8571 return 0;
8572 }
8573
8574 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8575 {
8576 struct tg3 *tp = netdev_priv(dev);
8577
8578 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8579 /* These are the only valid advertisement bits allowed. */
8580 if (cmd->autoneg == AUTONEG_ENABLE &&
8581 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8582 ADVERTISED_1000baseT_Full |
8583 ADVERTISED_Autoneg |
8584 ADVERTISED_FIBRE)))
8585 return -EINVAL;
8586 /* Fiber can only do SPEED_1000. */
8587 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8588 (cmd->speed != SPEED_1000))
8589 return -EINVAL;
8590 /* Copper cannot force SPEED_1000. */
8591 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8592 (cmd->speed == SPEED_1000))
8593 return -EINVAL;
8594 else if ((cmd->speed == SPEED_1000) &&
8595 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8596 return -EINVAL;
8597
8598 tg3_full_lock(tp, 0);
8599
8600 tp->link_config.autoneg = cmd->autoneg;
8601 if (cmd->autoneg == AUTONEG_ENABLE) {
8602 tp->link_config.advertising = (cmd->advertising |
8603 ADVERTISED_Autoneg);
8604 tp->link_config.speed = SPEED_INVALID;
8605 tp->link_config.duplex = DUPLEX_INVALID;
8606 } else {
8607 tp->link_config.advertising = 0;
8608 tp->link_config.speed = cmd->speed;
8609 tp->link_config.duplex = cmd->duplex;
8610 }
8611
8612 tp->link_config.orig_speed = tp->link_config.speed;
8613 tp->link_config.orig_duplex = tp->link_config.duplex;
8614 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8615
8616 if (netif_running(dev))
8617 tg3_setup_phy(tp, 1);
8618
8619 tg3_full_unlock(tp);
8620
8621 return 0;
8622 }
8623
8624 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8625 {
8626 struct tg3 *tp = netdev_priv(dev);
8627
8628 strcpy(info->driver, DRV_MODULE_NAME);
8629 strcpy(info->version, DRV_MODULE_VERSION);
8630 strcpy(info->fw_version, tp->fw_ver);
8631 strcpy(info->bus_info, pci_name(tp->pdev));
8632 }
8633
8634 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8635 {
8636 struct tg3 *tp = netdev_priv(dev);
8637
8638 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8639 wol->supported = WAKE_MAGIC;
8640 else
8641 wol->supported = 0;
8642 wol->wolopts = 0;
8643 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8644 wol->wolopts = WAKE_MAGIC;
8645 memset(&wol->sopass, 0, sizeof(wol->sopass));
8646 }
8647
8648 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8649 {
8650 struct tg3 *tp = netdev_priv(dev);
8651
8652 if (wol->wolopts & ~WAKE_MAGIC)
8653 return -EINVAL;
8654 if ((wol->wolopts & WAKE_MAGIC) &&
8655 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8656 return -EINVAL;
8657
8658 spin_lock_bh(&tp->lock);
8659 if (wol->wolopts & WAKE_MAGIC)
8660 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8661 else
8662 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8663 spin_unlock_bh(&tp->lock);
8664
8665 return 0;
8666 }
8667
8668 static u32 tg3_get_msglevel(struct net_device *dev)
8669 {
8670 struct tg3 *tp = netdev_priv(dev);
8671 return tp->msg_enable;
8672 }
8673
8674 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8675 {
8676 struct tg3 *tp = netdev_priv(dev);
8677 tp->msg_enable = value;
8678 }
8679
8680 static int tg3_set_tso(struct net_device *dev, u32 value)
8681 {
8682 struct tg3 *tp = netdev_priv(dev);
8683
8684 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8685 if (value)
8686 return -EINVAL;
8687 return 0;
8688 }
8689 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8690 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8691 if (value) {
8692 dev->features |= NETIF_F_TSO6;
8693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8694 dev->features |= NETIF_F_TSO_ECN;
8695 } else
8696 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8697 }
8698 return ethtool_op_set_tso(dev, value);
8699 }
8700
8701 static int tg3_nway_reset(struct net_device *dev)
8702 {
8703 struct tg3 *tp = netdev_priv(dev);
8704 u32 bmcr;
8705 int r;
8706
8707 if (!netif_running(dev))
8708 return -EAGAIN;
8709
8710 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8711 return -EINVAL;
8712
8713 spin_lock_bh(&tp->lock);
8714 r = -EINVAL;
8715 tg3_readphy(tp, MII_BMCR, &bmcr);
8716 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8717 ((bmcr & BMCR_ANENABLE) ||
8718 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8719 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8720 BMCR_ANENABLE);
8721 r = 0;
8722 }
8723 spin_unlock_bh(&tp->lock);
8724
8725 return r;
8726 }
8727
8728 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8729 {
8730 struct tg3 *tp = netdev_priv(dev);
8731
8732 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8733 ering->rx_mini_max_pending = 0;
8734 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8735 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8736 else
8737 ering->rx_jumbo_max_pending = 0;
8738
8739 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8740
8741 ering->rx_pending = tp->rx_pending;
8742 ering->rx_mini_pending = 0;
8743 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8744 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8745 else
8746 ering->rx_jumbo_pending = 0;
8747
8748 ering->tx_pending = tp->tx_pending;
8749 }
8750
8751 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8752 {
8753 struct tg3 *tp = netdev_priv(dev);
8754 int irq_sync = 0, err = 0;
8755
8756 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8757 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8758 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8759 (ering->tx_pending <= MAX_SKB_FRAGS) ||
8760 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8761 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8762 return -EINVAL;
8763
8764 if (netif_running(dev)) {
8765 tg3_netif_stop(tp);
8766 irq_sync = 1;
8767 }
8768
8769 tg3_full_lock(tp, irq_sync);
8770
8771 tp->rx_pending = ering->rx_pending;
8772
8773 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8774 tp->rx_pending > 63)
8775 tp->rx_pending = 63;
8776 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8777 tp->tx_pending = ering->tx_pending;
8778
8779 if (netif_running(dev)) {
8780 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8781 err = tg3_restart_hw(tp, 1);
8782 if (!err)
8783 tg3_netif_start(tp);
8784 }
8785
8786 tg3_full_unlock(tp);
8787
8788 return err;
8789 }
8790
8791 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8792 {
8793 struct tg3 *tp = netdev_priv(dev);
8794
8795 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8796
8797 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8798 epause->rx_pause = 1;
8799 else
8800 epause->rx_pause = 0;
8801
8802 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8803 epause->tx_pause = 1;
8804 else
8805 epause->tx_pause = 0;
8806 }
8807
8808 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8809 {
8810 struct tg3 *tp = netdev_priv(dev);
8811 int irq_sync = 0, err = 0;
8812
8813 if (netif_running(dev)) {
8814 tg3_netif_stop(tp);
8815 irq_sync = 1;
8816 }
8817
8818 tg3_full_lock(tp, irq_sync);
8819
8820 if (epause->autoneg)
8821 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8822 else
8823 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8824 if (epause->rx_pause)
8825 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8826 else
8827 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8828 if (epause->tx_pause)
8829 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8830 else
8831 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8832
8833 if (netif_running(dev)) {
8834 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8835 err = tg3_restart_hw(tp, 1);
8836 if (!err)
8837 tg3_netif_start(tp);
8838 }
8839
8840 tg3_full_unlock(tp);
8841
8842 return err;
8843 }
8844
8845 static u32 tg3_get_rx_csum(struct net_device *dev)
8846 {
8847 struct tg3 *tp = netdev_priv(dev);
8848 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8849 }
8850
8851 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8852 {
8853 struct tg3 *tp = netdev_priv(dev);
8854
8855 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8856 if (data != 0)
8857 return -EINVAL;
8858 return 0;
8859 }
8860
8861 spin_lock_bh(&tp->lock);
8862 if (data)
8863 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8864 else
8865 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8866 spin_unlock_bh(&tp->lock);
8867
8868 return 0;
8869 }
8870
8871 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8872 {
8873 struct tg3 *tp = netdev_priv(dev);
8874
8875 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8876 if (data != 0)
8877 return -EINVAL;
8878 return 0;
8879 }
8880
8881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8882 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8885 ethtool_op_set_tx_ipv6_csum(dev, data);
8886 else
8887 ethtool_op_set_tx_csum(dev, data);
8888
8889 return 0;
8890 }
8891
8892 static int tg3_get_sset_count (struct net_device *dev, int sset)
8893 {
8894 switch (sset) {
8895 case ETH_SS_TEST:
8896 return TG3_NUM_TEST;
8897 case ETH_SS_STATS:
8898 return TG3_NUM_STATS;
8899 default:
8900 return -EOPNOTSUPP;
8901 }
8902 }
8903
8904 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8905 {
8906 switch (stringset) {
8907 case ETH_SS_STATS:
8908 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8909 break;
8910 case ETH_SS_TEST:
8911 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8912 break;
8913 default:
8914 WARN_ON(1); /* we need a WARN() */
8915 break;
8916 }
8917 }
8918
8919 static int tg3_phys_id(struct net_device *dev, u32 data)
8920 {
8921 struct tg3 *tp = netdev_priv(dev);
8922 int i;
8923
8924 if (!netif_running(tp->dev))
8925 return -EAGAIN;
8926
8927 if (data == 0)
8928 data = UINT_MAX / 2;
8929
8930 for (i = 0; i < (data * 2); i++) {
8931 if ((i % 2) == 0)
8932 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8933 LED_CTRL_1000MBPS_ON |
8934 LED_CTRL_100MBPS_ON |
8935 LED_CTRL_10MBPS_ON |
8936 LED_CTRL_TRAFFIC_OVERRIDE |
8937 LED_CTRL_TRAFFIC_BLINK |
8938 LED_CTRL_TRAFFIC_LED);
8939
8940 else
8941 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8942 LED_CTRL_TRAFFIC_OVERRIDE);
8943
8944 if (msleep_interruptible(500))
8945 break;
8946 }
8947 tw32(MAC_LED_CTRL, tp->led_ctrl);
8948 return 0;
8949 }
8950
8951 static void tg3_get_ethtool_stats (struct net_device *dev,
8952 struct ethtool_stats *estats, u64 *tmp_stats)
8953 {
8954 struct tg3 *tp = netdev_priv(dev);
8955 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8956 }
8957
8958 #define NVRAM_TEST_SIZE 0x100
8959 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8960 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8961 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
8962 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8963 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8964
8965 static int tg3_test_nvram(struct tg3 *tp)
8966 {
8967 u32 csum, magic;
8968 __le32 *buf;
8969 int i, j, k, err = 0, size;
8970
8971 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8972 return -EIO;
8973
8974 if (magic == TG3_EEPROM_MAGIC)
8975 size = NVRAM_TEST_SIZE;
8976 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8977 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8978 TG3_EEPROM_SB_FORMAT_1) {
8979 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8980 case TG3_EEPROM_SB_REVISION_0:
8981 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8982 break;
8983 case TG3_EEPROM_SB_REVISION_2:
8984 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8985 break;
8986 case TG3_EEPROM_SB_REVISION_3:
8987 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8988 break;
8989 default:
8990 return 0;
8991 }
8992 } else
8993 return 0;
8994 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8995 size = NVRAM_SELFBOOT_HW_SIZE;
8996 else
8997 return -EIO;
8998
8999 buf = kmalloc(size, GFP_KERNEL);
9000 if (buf == NULL)
9001 return -ENOMEM;
9002
9003 err = -EIO;
9004 for (i = 0, j = 0; i < size; i += 4, j++) {
9005 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9006 break;
9007 }
9008 if (i < size)
9009 goto out;
9010
9011 /* Selfboot format */
9012 magic = swab32(le32_to_cpu(buf[0]));
9013 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9014 TG3_EEPROM_MAGIC_FW) {
9015 u8 *buf8 = (u8 *) buf, csum8 = 0;
9016
9017 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9018 TG3_EEPROM_SB_REVISION_2) {
9019 /* For rev 2, the csum doesn't include the MBA. */
9020 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9021 csum8 += buf8[i];
9022 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9023 csum8 += buf8[i];
9024 } else {
9025 for (i = 0; i < size; i++)
9026 csum8 += buf8[i];
9027 }
9028
9029 if (csum8 == 0) {
9030 err = 0;
9031 goto out;
9032 }
9033
9034 err = -EIO;
9035 goto out;
9036 }
9037
9038 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9039 TG3_EEPROM_MAGIC_HW) {
9040 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9041 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9042 u8 *buf8 = (u8 *) buf;
9043
9044 /* Separate the parity bits and the data bytes. */
9045 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9046 if ((i == 0) || (i == 8)) {
9047 int l;
9048 u8 msk;
9049
9050 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9051 parity[k++] = buf8[i] & msk;
9052 i++;
9053 }
9054 else if (i == 16) {
9055 int l;
9056 u8 msk;
9057
9058 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9059 parity[k++] = buf8[i] & msk;
9060 i++;
9061
9062 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9063 parity[k++] = buf8[i] & msk;
9064 i++;
9065 }
9066 data[j++] = buf8[i];
9067 }
9068
9069 err = -EIO;
9070 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9071 u8 hw8 = hweight8(data[i]);
9072
9073 if ((hw8 & 0x1) && parity[i])
9074 goto out;
9075 else if (!(hw8 & 0x1) && !parity[i])
9076 goto out;
9077 }
9078 err = 0;
9079 goto out;
9080 }
9081
9082 /* Bootstrap checksum at offset 0x10 */
9083 csum = calc_crc((unsigned char *) buf, 0x10);
9084 if(csum != le32_to_cpu(buf[0x10/4]))
9085 goto out;
9086
9087 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9088 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9089 if (csum != le32_to_cpu(buf[0xfc/4]))
9090 goto out;
9091
9092 err = 0;
9093
9094 out:
9095 kfree(buf);
9096 return err;
9097 }
9098
9099 #define TG3_SERDES_TIMEOUT_SEC 2
9100 #define TG3_COPPER_TIMEOUT_SEC 6
9101
9102 static int tg3_test_link(struct tg3 *tp)
9103 {
9104 int i, max;
9105
9106 if (!netif_running(tp->dev))
9107 return -ENODEV;
9108
9109 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9110 max = TG3_SERDES_TIMEOUT_SEC;
9111 else
9112 max = TG3_COPPER_TIMEOUT_SEC;
9113
9114 for (i = 0; i < max; i++) {
9115 if (netif_carrier_ok(tp->dev))
9116 return 0;
9117
9118 if (msleep_interruptible(1000))
9119 break;
9120 }
9121
9122 return -EIO;
9123 }
9124
9125 /* Only test the commonly used registers */
9126 static int tg3_test_registers(struct tg3 *tp)
9127 {
9128 int i, is_5705, is_5750;
9129 u32 offset, read_mask, write_mask, val, save_val, read_val;
9130 static struct {
9131 u16 offset;
9132 u16 flags;
9133 #define TG3_FL_5705 0x1
9134 #define TG3_FL_NOT_5705 0x2
9135 #define TG3_FL_NOT_5788 0x4
9136 #define TG3_FL_NOT_5750 0x8
9137 u32 read_mask;
9138 u32 write_mask;
9139 } reg_tbl[] = {
9140 /* MAC Control Registers */
9141 { MAC_MODE, TG3_FL_NOT_5705,
9142 0x00000000, 0x00ef6f8c },
9143 { MAC_MODE, TG3_FL_5705,
9144 0x00000000, 0x01ef6b8c },
9145 { MAC_STATUS, TG3_FL_NOT_5705,
9146 0x03800107, 0x00000000 },
9147 { MAC_STATUS, TG3_FL_5705,
9148 0x03800100, 0x00000000 },
9149 { MAC_ADDR_0_HIGH, 0x0000,
9150 0x00000000, 0x0000ffff },
9151 { MAC_ADDR_0_LOW, 0x0000,
9152 0x00000000, 0xffffffff },
9153 { MAC_RX_MTU_SIZE, 0x0000,
9154 0x00000000, 0x0000ffff },
9155 { MAC_TX_MODE, 0x0000,
9156 0x00000000, 0x00000070 },
9157 { MAC_TX_LENGTHS, 0x0000,
9158 0x00000000, 0x00003fff },
9159 { MAC_RX_MODE, TG3_FL_NOT_5705,
9160 0x00000000, 0x000007fc },
9161 { MAC_RX_MODE, TG3_FL_5705,
9162 0x00000000, 0x000007dc },
9163 { MAC_HASH_REG_0, 0x0000,
9164 0x00000000, 0xffffffff },
9165 { MAC_HASH_REG_1, 0x0000,
9166 0x00000000, 0xffffffff },
9167 { MAC_HASH_REG_2, 0x0000,
9168 0x00000000, 0xffffffff },
9169 { MAC_HASH_REG_3, 0x0000,
9170 0x00000000, 0xffffffff },
9171
9172 /* Receive Data and Receive BD Initiator Control Registers. */
9173 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9174 0x00000000, 0xffffffff },
9175 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9176 0x00000000, 0xffffffff },
9177 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9178 0x00000000, 0x00000003 },
9179 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9180 0x00000000, 0xffffffff },
9181 { RCVDBDI_STD_BD+0, 0x0000,
9182 0x00000000, 0xffffffff },
9183 { RCVDBDI_STD_BD+4, 0x0000,
9184 0x00000000, 0xffffffff },
9185 { RCVDBDI_STD_BD+8, 0x0000,
9186 0x00000000, 0xffff0002 },
9187 { RCVDBDI_STD_BD+0xc, 0x0000,
9188 0x00000000, 0xffffffff },
9189
9190 /* Receive BD Initiator Control Registers. */
9191 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9192 0x00000000, 0xffffffff },
9193 { RCVBDI_STD_THRESH, TG3_FL_5705,
9194 0x00000000, 0x000003ff },
9195 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9196 0x00000000, 0xffffffff },
9197
9198 /* Host Coalescing Control Registers. */
9199 { HOSTCC_MODE, TG3_FL_NOT_5705,
9200 0x00000000, 0x00000004 },
9201 { HOSTCC_MODE, TG3_FL_5705,
9202 0x00000000, 0x000000f6 },
9203 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9204 0x00000000, 0xffffffff },
9205 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9206 0x00000000, 0x000003ff },
9207 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9208 0x00000000, 0xffffffff },
9209 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9210 0x00000000, 0x000003ff },
9211 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9212 0x00000000, 0xffffffff },
9213 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9214 0x00000000, 0x000000ff },
9215 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9216 0x00000000, 0xffffffff },
9217 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9218 0x00000000, 0x000000ff },
9219 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9220 0x00000000, 0xffffffff },
9221 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9222 0x00000000, 0xffffffff },
9223 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9224 0x00000000, 0xffffffff },
9225 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9226 0x00000000, 0x000000ff },
9227 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9228 0x00000000, 0xffffffff },
9229 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9230 0x00000000, 0x000000ff },
9231 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9232 0x00000000, 0xffffffff },
9233 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9234 0x00000000, 0xffffffff },
9235 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9236 0x00000000, 0xffffffff },
9237 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9238 0x00000000, 0xffffffff },
9239 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9240 0x00000000, 0xffffffff },
9241 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9242 0xffffffff, 0x00000000 },
9243 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9244 0xffffffff, 0x00000000 },
9245
9246 /* Buffer Manager Control Registers. */
9247 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9248 0x00000000, 0x007fff80 },
9249 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9250 0x00000000, 0x007fffff },
9251 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9252 0x00000000, 0x0000003f },
9253 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9254 0x00000000, 0x000001ff },
9255 { BUFMGR_MB_HIGH_WATER, 0x0000,
9256 0x00000000, 0x000001ff },
9257 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9258 0xffffffff, 0x00000000 },
9259 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9260 0xffffffff, 0x00000000 },
9261
9262 /* Mailbox Registers */
9263 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9264 0x00000000, 0x000001ff },
9265 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9266 0x00000000, 0x000001ff },
9267 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9268 0x00000000, 0x000007ff },
9269 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9270 0x00000000, 0x000001ff },
9271
9272 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9273 };
9274
9275 is_5705 = is_5750 = 0;
9276 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9277 is_5705 = 1;
9278 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9279 is_5750 = 1;
9280 }
9281
9282 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9283 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9284 continue;
9285
9286 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9287 continue;
9288
9289 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9290 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9291 continue;
9292
9293 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9294 continue;
9295
9296 offset = (u32) reg_tbl[i].offset;
9297 read_mask = reg_tbl[i].read_mask;
9298 write_mask = reg_tbl[i].write_mask;
9299
9300 /* Save the original register content */
9301 save_val = tr32(offset);
9302
9303 /* Determine the read-only value. */
9304 read_val = save_val & read_mask;
9305
9306 /* Write zero to the register, then make sure the read-only bits
9307 * are not changed and the read/write bits are all zeros.
9308 */
9309 tw32(offset, 0);
9310
9311 val = tr32(offset);
9312
9313 /* Test the read-only and read/write bits. */
9314 if (((val & read_mask) != read_val) || (val & write_mask))
9315 goto out;
9316
9317 /* Write ones to all the bits defined by RdMask and WrMask, then
9318 * make sure the read-only bits are not changed and the
9319 * read/write bits are all ones.
9320 */
9321 tw32(offset, read_mask | write_mask);
9322
9323 val = tr32(offset);
9324
9325 /* Test the read-only bits. */
9326 if ((val & read_mask) != read_val)
9327 goto out;
9328
9329 /* Test the read/write bits. */
9330 if ((val & write_mask) != write_mask)
9331 goto out;
9332
9333 tw32(offset, save_val);
9334 }
9335
9336 return 0;
9337
9338 out:
9339 if (netif_msg_hw(tp))
9340 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9341 offset);
9342 tw32(offset, save_val);
9343 return -EIO;
9344 }
9345
9346 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9347 {
9348 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9349 int i;
9350 u32 j;
9351
9352 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9353 for (j = 0; j < len; j += 4) {
9354 u32 val;
9355
9356 tg3_write_mem(tp, offset + j, test_pattern[i]);
9357 tg3_read_mem(tp, offset + j, &val);
9358 if (val != test_pattern[i])
9359 return -EIO;
9360 }
9361 }
9362 return 0;
9363 }
9364
9365 static int tg3_test_memory(struct tg3 *tp)
9366 {
9367 static struct mem_entry {
9368 u32 offset;
9369 u32 len;
9370 } mem_tbl_570x[] = {
9371 { 0x00000000, 0x00b50},
9372 { 0x00002000, 0x1c000},
9373 { 0xffffffff, 0x00000}
9374 }, mem_tbl_5705[] = {
9375 { 0x00000100, 0x0000c},
9376 { 0x00000200, 0x00008},
9377 { 0x00004000, 0x00800},
9378 { 0x00006000, 0x01000},
9379 { 0x00008000, 0x02000},
9380 { 0x00010000, 0x0e000},
9381 { 0xffffffff, 0x00000}
9382 }, mem_tbl_5755[] = {
9383 { 0x00000200, 0x00008},
9384 { 0x00004000, 0x00800},
9385 { 0x00006000, 0x00800},
9386 { 0x00008000, 0x02000},
9387 { 0x00010000, 0x0c000},
9388 { 0xffffffff, 0x00000}
9389 }, mem_tbl_5906[] = {
9390 { 0x00000200, 0x00008},
9391 { 0x00004000, 0x00400},
9392 { 0x00006000, 0x00400},
9393 { 0x00008000, 0x01000},
9394 { 0x00010000, 0x01000},
9395 { 0xffffffff, 0x00000}
9396 };
9397 struct mem_entry *mem_tbl;
9398 int err = 0;
9399 int i;
9400
9401 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9405 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9406 mem_tbl = mem_tbl_5755;
9407 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9408 mem_tbl = mem_tbl_5906;
9409 else
9410 mem_tbl = mem_tbl_5705;
9411 } else
9412 mem_tbl = mem_tbl_570x;
9413
9414 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9415 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9416 mem_tbl[i].len)) != 0)
9417 break;
9418 }
9419
9420 return err;
9421 }
9422
9423 #define TG3_MAC_LOOPBACK 0
9424 #define TG3_PHY_LOOPBACK 1
9425
9426 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9427 {
9428 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9429 u32 desc_idx;
9430 struct sk_buff *skb, *rx_skb;
9431 u8 *tx_data;
9432 dma_addr_t map;
9433 int num_pkts, tx_len, rx_len, i, err;
9434 struct tg3_rx_buffer_desc *desc;
9435
9436 if (loopback_mode == TG3_MAC_LOOPBACK) {
9437 /* HW errata - mac loopback fails in some cases on 5780.
9438 * Normal traffic and PHY loopback are not affected by
9439 * errata.
9440 */
9441 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9442 return 0;
9443
9444 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9445 MAC_MODE_PORT_INT_LPBACK;
9446 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9447 mac_mode |= MAC_MODE_LINK_POLARITY;
9448 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9449 mac_mode |= MAC_MODE_PORT_MODE_MII;
9450 else
9451 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9452 tw32(MAC_MODE, mac_mode);
9453 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9454 u32 val;
9455
9456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9457 u32 phytest;
9458
9459 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9460 u32 phy;
9461
9462 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9463 phytest | MII_TG3_EPHY_SHADOW_EN);
9464 if (!tg3_readphy(tp, 0x1b, &phy))
9465 tg3_writephy(tp, 0x1b, phy & ~0x20);
9466 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9467 }
9468 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9469 } else
9470 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9471
9472 tg3_phy_toggle_automdix(tp, 0);
9473
9474 tg3_writephy(tp, MII_BMCR, val);
9475 udelay(40);
9476
9477 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9478 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9479 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9480 mac_mode |= MAC_MODE_PORT_MODE_MII;
9481 } else
9482 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9483
9484 /* reset to prevent losing 1st rx packet intermittently */
9485 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9486 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9487 udelay(10);
9488 tw32_f(MAC_RX_MODE, tp->rx_mode);
9489 }
9490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9491 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9492 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9493 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9494 mac_mode |= MAC_MODE_LINK_POLARITY;
9495 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9496 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9497 }
9498 tw32(MAC_MODE, mac_mode);
9499 }
9500 else
9501 return -EINVAL;
9502
9503 err = -EIO;
9504
9505 tx_len = 1514;
9506 skb = netdev_alloc_skb(tp->dev, tx_len);
9507 if (!skb)
9508 return -ENOMEM;
9509
9510 tx_data = skb_put(skb, tx_len);
9511 memcpy(tx_data, tp->dev->dev_addr, 6);
9512 memset(tx_data + 6, 0x0, 8);
9513
9514 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9515
9516 for (i = 14; i < tx_len; i++)
9517 tx_data[i] = (u8) (i & 0xff);
9518
9519 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9520
9521 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9522 HOSTCC_MODE_NOW);
9523
9524 udelay(10);
9525
9526 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9527
9528 num_pkts = 0;
9529
9530 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9531
9532 tp->tx_prod++;
9533 num_pkts++;
9534
9535 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9536 tp->tx_prod);
9537 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9538
9539 udelay(10);
9540
9541 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9542 for (i = 0; i < 25; i++) {
9543 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9544 HOSTCC_MODE_NOW);
9545
9546 udelay(10);
9547
9548 tx_idx = tp->hw_status->idx[0].tx_consumer;
9549 rx_idx = tp->hw_status->idx[0].rx_producer;
9550 if ((tx_idx == tp->tx_prod) &&
9551 (rx_idx == (rx_start_idx + num_pkts)))
9552 break;
9553 }
9554
9555 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9556 dev_kfree_skb(skb);
9557
9558 if (tx_idx != tp->tx_prod)
9559 goto out;
9560
9561 if (rx_idx != rx_start_idx + num_pkts)
9562 goto out;
9563
9564 desc = &tp->rx_rcb[rx_start_idx];
9565 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9566 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9567 if (opaque_key != RXD_OPAQUE_RING_STD)
9568 goto out;
9569
9570 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9571 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9572 goto out;
9573
9574 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9575 if (rx_len != tx_len)
9576 goto out;
9577
9578 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9579
9580 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9581 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9582
9583 for (i = 14; i < tx_len; i++) {
9584 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9585 goto out;
9586 }
9587 err = 0;
9588
9589 /* tg3_free_rings will unmap and free the rx_skb */
9590 out:
9591 return err;
9592 }
9593
9594 #define TG3_MAC_LOOPBACK_FAILED 1
9595 #define TG3_PHY_LOOPBACK_FAILED 2
9596 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9597 TG3_PHY_LOOPBACK_FAILED)
9598
9599 static int tg3_test_loopback(struct tg3 *tp)
9600 {
9601 int err = 0;
9602 u32 cpmuctrl = 0;
9603
9604 if (!netif_running(tp->dev))
9605 return TG3_LOOPBACK_FAILED;
9606
9607 err = tg3_reset_hw(tp, 1);
9608 if (err)
9609 return TG3_LOOPBACK_FAILED;
9610
9611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9613 int i;
9614 u32 status;
9615
9616 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9617
9618 /* Wait for up to 40 microseconds to acquire lock. */
9619 for (i = 0; i < 4; i++) {
9620 status = tr32(TG3_CPMU_MUTEX_GNT);
9621 if (status == CPMU_MUTEX_GNT_DRIVER)
9622 break;
9623 udelay(10);
9624 }
9625
9626 if (status != CPMU_MUTEX_GNT_DRIVER)
9627 return TG3_LOOPBACK_FAILED;
9628
9629 /* Turn off link-based power management. */
9630 cpmuctrl = tr32(TG3_CPMU_CTRL);
9631 tw32(TG3_CPMU_CTRL,
9632 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9633 CPMU_CTRL_LINK_AWARE_MODE));
9634 }
9635
9636 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9637 err |= TG3_MAC_LOOPBACK_FAILED;
9638
9639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9640 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9641 tw32(TG3_CPMU_CTRL, cpmuctrl);
9642
9643 /* Release the mutex */
9644 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9645 }
9646
9647 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9648 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9649 err |= TG3_PHY_LOOPBACK_FAILED;
9650 }
9651
9652 return err;
9653 }
9654
9655 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9656 u64 *data)
9657 {
9658 struct tg3 *tp = netdev_priv(dev);
9659
9660 if (tp->link_config.phy_is_low_power)
9661 tg3_set_power_state(tp, PCI_D0);
9662
9663 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9664
9665 if (tg3_test_nvram(tp) != 0) {
9666 etest->flags |= ETH_TEST_FL_FAILED;
9667 data[0] = 1;
9668 }
9669 if (tg3_test_link(tp) != 0) {
9670 etest->flags |= ETH_TEST_FL_FAILED;
9671 data[1] = 1;
9672 }
9673 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9674 int err, irq_sync = 0;
9675
9676 if (netif_running(dev)) {
9677 tg3_netif_stop(tp);
9678 irq_sync = 1;
9679 }
9680
9681 tg3_full_lock(tp, irq_sync);
9682
9683 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9684 err = tg3_nvram_lock(tp);
9685 tg3_halt_cpu(tp, RX_CPU_BASE);
9686 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9687 tg3_halt_cpu(tp, TX_CPU_BASE);
9688 if (!err)
9689 tg3_nvram_unlock(tp);
9690
9691 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9692 tg3_phy_reset(tp);
9693
9694 if (tg3_test_registers(tp) != 0) {
9695 etest->flags |= ETH_TEST_FL_FAILED;
9696 data[2] = 1;
9697 }
9698 if (tg3_test_memory(tp) != 0) {
9699 etest->flags |= ETH_TEST_FL_FAILED;
9700 data[3] = 1;
9701 }
9702 if ((data[4] = tg3_test_loopback(tp)) != 0)
9703 etest->flags |= ETH_TEST_FL_FAILED;
9704
9705 tg3_full_unlock(tp);
9706
9707 if (tg3_test_interrupt(tp) != 0) {
9708 etest->flags |= ETH_TEST_FL_FAILED;
9709 data[5] = 1;
9710 }
9711
9712 tg3_full_lock(tp, 0);
9713
9714 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9715 if (netif_running(dev)) {
9716 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9717 if (!tg3_restart_hw(tp, 1))
9718 tg3_netif_start(tp);
9719 }
9720
9721 tg3_full_unlock(tp);
9722 }
9723 if (tp->link_config.phy_is_low_power)
9724 tg3_set_power_state(tp, PCI_D3hot);
9725
9726 }
9727
9728 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9729 {
9730 struct mii_ioctl_data *data = if_mii(ifr);
9731 struct tg3 *tp = netdev_priv(dev);
9732 int err;
9733
9734 switch(cmd) {
9735 case SIOCGMIIPHY:
9736 data->phy_id = PHY_ADDR;
9737
9738 /* fallthru */
9739 case SIOCGMIIREG: {
9740 u32 mii_regval;
9741
9742 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9743 break; /* We have no PHY */
9744
9745 if (tp->link_config.phy_is_low_power)
9746 return -EAGAIN;
9747
9748 spin_lock_bh(&tp->lock);
9749 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9750 spin_unlock_bh(&tp->lock);
9751
9752 data->val_out = mii_regval;
9753
9754 return err;
9755 }
9756
9757 case SIOCSMIIREG:
9758 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9759 break; /* We have no PHY */
9760
9761 if (!capable(CAP_NET_ADMIN))
9762 return -EPERM;
9763
9764 if (tp->link_config.phy_is_low_power)
9765 return -EAGAIN;
9766
9767 spin_lock_bh(&tp->lock);
9768 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9769 spin_unlock_bh(&tp->lock);
9770
9771 return err;
9772
9773 default:
9774 /* do nothing */
9775 break;
9776 }
9777 return -EOPNOTSUPP;
9778 }
9779
9780 #if TG3_VLAN_TAG_USED
9781 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9782 {
9783 struct tg3 *tp = netdev_priv(dev);
9784
9785 if (netif_running(dev))
9786 tg3_netif_stop(tp);
9787
9788 tg3_full_lock(tp, 0);
9789
9790 tp->vlgrp = grp;
9791
9792 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9793 __tg3_set_rx_mode(dev);
9794
9795 if (netif_running(dev))
9796 tg3_netif_start(tp);
9797
9798 tg3_full_unlock(tp);
9799 }
9800 #endif
9801
9802 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9803 {
9804 struct tg3 *tp = netdev_priv(dev);
9805
9806 memcpy(ec, &tp->coal, sizeof(*ec));
9807 return 0;
9808 }
9809
9810 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9811 {
9812 struct tg3 *tp = netdev_priv(dev);
9813 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9814 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9815
9816 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9817 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9818 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9819 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9820 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9821 }
9822
9823 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9824 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9825 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9826 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9827 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9828 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9829 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9830 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9831 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9832 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9833 return -EINVAL;
9834
9835 /* No rx interrupts will be generated if both are zero */
9836 if ((ec->rx_coalesce_usecs == 0) &&
9837 (ec->rx_max_coalesced_frames == 0))
9838 return -EINVAL;
9839
9840 /* No tx interrupts will be generated if both are zero */
9841 if ((ec->tx_coalesce_usecs == 0) &&
9842 (ec->tx_max_coalesced_frames == 0))
9843 return -EINVAL;
9844
9845 /* Only copy relevant parameters, ignore all others. */
9846 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9847 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9848 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9849 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9850 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9851 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9852 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9853 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9854 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9855
9856 if (netif_running(dev)) {
9857 tg3_full_lock(tp, 0);
9858 __tg3_set_coalesce(tp, &tp->coal);
9859 tg3_full_unlock(tp);
9860 }
9861 return 0;
9862 }
9863
9864 static const struct ethtool_ops tg3_ethtool_ops = {
9865 .get_settings = tg3_get_settings,
9866 .set_settings = tg3_set_settings,
9867 .get_drvinfo = tg3_get_drvinfo,
9868 .get_regs_len = tg3_get_regs_len,
9869 .get_regs = tg3_get_regs,
9870 .get_wol = tg3_get_wol,
9871 .set_wol = tg3_set_wol,
9872 .get_msglevel = tg3_get_msglevel,
9873 .set_msglevel = tg3_set_msglevel,
9874 .nway_reset = tg3_nway_reset,
9875 .get_link = ethtool_op_get_link,
9876 .get_eeprom_len = tg3_get_eeprom_len,
9877 .get_eeprom = tg3_get_eeprom,
9878 .set_eeprom = tg3_set_eeprom,
9879 .get_ringparam = tg3_get_ringparam,
9880 .set_ringparam = tg3_set_ringparam,
9881 .get_pauseparam = tg3_get_pauseparam,
9882 .set_pauseparam = tg3_set_pauseparam,
9883 .get_rx_csum = tg3_get_rx_csum,
9884 .set_rx_csum = tg3_set_rx_csum,
9885 .set_tx_csum = tg3_set_tx_csum,
9886 .set_sg = ethtool_op_set_sg,
9887 .set_tso = tg3_set_tso,
9888 .self_test = tg3_self_test,
9889 .get_strings = tg3_get_strings,
9890 .phys_id = tg3_phys_id,
9891 .get_ethtool_stats = tg3_get_ethtool_stats,
9892 .get_coalesce = tg3_get_coalesce,
9893 .set_coalesce = tg3_set_coalesce,
9894 .get_sset_count = tg3_get_sset_count,
9895 };
9896
9897 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9898 {
9899 u32 cursize, val, magic;
9900
9901 tp->nvram_size = EEPROM_CHIP_SIZE;
9902
9903 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9904 return;
9905
9906 if ((magic != TG3_EEPROM_MAGIC) &&
9907 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9908 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9909 return;
9910
9911 /*
9912 * Size the chip by reading offsets at increasing powers of two.
9913 * When we encounter our validation signature, we know the addressing
9914 * has wrapped around, and thus have our chip size.
9915 */
9916 cursize = 0x10;
9917
9918 while (cursize < tp->nvram_size) {
9919 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9920 return;
9921
9922 if (val == magic)
9923 break;
9924
9925 cursize <<= 1;
9926 }
9927
9928 tp->nvram_size = cursize;
9929 }
9930
9931 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9932 {
9933 u32 val;
9934
9935 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9936 return;
9937
9938 /* Selfboot format */
9939 if (val != TG3_EEPROM_MAGIC) {
9940 tg3_get_eeprom_size(tp);
9941 return;
9942 }
9943
9944 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9945 if (val != 0) {
9946 tp->nvram_size = (val >> 16) * 1024;
9947 return;
9948 }
9949 }
9950 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
9951 }
9952
9953 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9954 {
9955 u32 nvcfg1;
9956
9957 nvcfg1 = tr32(NVRAM_CFG1);
9958 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9959 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9960 }
9961 else {
9962 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9963 tw32(NVRAM_CFG1, nvcfg1);
9964 }
9965
9966 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9967 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9968 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9969 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9970 tp->nvram_jedecnum = JEDEC_ATMEL;
9971 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9972 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9973 break;
9974 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9975 tp->nvram_jedecnum = JEDEC_ATMEL;
9976 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9977 break;
9978 case FLASH_VENDOR_ATMEL_EEPROM:
9979 tp->nvram_jedecnum = JEDEC_ATMEL;
9980 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9981 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9982 break;
9983 case FLASH_VENDOR_ST:
9984 tp->nvram_jedecnum = JEDEC_ST;
9985 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9986 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9987 break;
9988 case FLASH_VENDOR_SAIFUN:
9989 tp->nvram_jedecnum = JEDEC_SAIFUN;
9990 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9991 break;
9992 case FLASH_VENDOR_SST_SMALL:
9993 case FLASH_VENDOR_SST_LARGE:
9994 tp->nvram_jedecnum = JEDEC_SST;
9995 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9996 break;
9997 }
9998 }
9999 else {
10000 tp->nvram_jedecnum = JEDEC_ATMEL;
10001 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10002 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10003 }
10004 }
10005
10006 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10007 {
10008 u32 nvcfg1;
10009
10010 nvcfg1 = tr32(NVRAM_CFG1);
10011
10012 /* NVRAM protection for TPM */
10013 if (nvcfg1 & (1 << 27))
10014 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10015
10016 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10017 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10018 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10019 tp->nvram_jedecnum = JEDEC_ATMEL;
10020 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10021 break;
10022 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10023 tp->nvram_jedecnum = JEDEC_ATMEL;
10024 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10025 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10026 break;
10027 case FLASH_5752VENDOR_ST_M45PE10:
10028 case FLASH_5752VENDOR_ST_M45PE20:
10029 case FLASH_5752VENDOR_ST_M45PE40:
10030 tp->nvram_jedecnum = JEDEC_ST;
10031 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10032 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10033 break;
10034 }
10035
10036 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10037 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10038 case FLASH_5752PAGE_SIZE_256:
10039 tp->nvram_pagesize = 256;
10040 break;
10041 case FLASH_5752PAGE_SIZE_512:
10042 tp->nvram_pagesize = 512;
10043 break;
10044 case FLASH_5752PAGE_SIZE_1K:
10045 tp->nvram_pagesize = 1024;
10046 break;
10047 case FLASH_5752PAGE_SIZE_2K:
10048 tp->nvram_pagesize = 2048;
10049 break;
10050 case FLASH_5752PAGE_SIZE_4K:
10051 tp->nvram_pagesize = 4096;
10052 break;
10053 case FLASH_5752PAGE_SIZE_264:
10054 tp->nvram_pagesize = 264;
10055 break;
10056 }
10057 }
10058 else {
10059 /* For eeprom, set pagesize to maximum eeprom size */
10060 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10061
10062 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10063 tw32(NVRAM_CFG1, nvcfg1);
10064 }
10065 }
10066
10067 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10068 {
10069 u32 nvcfg1, protect = 0;
10070
10071 nvcfg1 = tr32(NVRAM_CFG1);
10072
10073 /* NVRAM protection for TPM */
10074 if (nvcfg1 & (1 << 27)) {
10075 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10076 protect = 1;
10077 }
10078
10079 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10080 switch (nvcfg1) {
10081 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10082 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10083 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10084 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10085 tp->nvram_jedecnum = JEDEC_ATMEL;
10086 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10087 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10088 tp->nvram_pagesize = 264;
10089 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10090 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10091 tp->nvram_size = (protect ? 0x3e200 :
10092 TG3_NVRAM_SIZE_512KB);
10093 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10094 tp->nvram_size = (protect ? 0x1f200 :
10095 TG3_NVRAM_SIZE_256KB);
10096 else
10097 tp->nvram_size = (protect ? 0x1f200 :
10098 TG3_NVRAM_SIZE_128KB);
10099 break;
10100 case FLASH_5752VENDOR_ST_M45PE10:
10101 case FLASH_5752VENDOR_ST_M45PE20:
10102 case FLASH_5752VENDOR_ST_M45PE40:
10103 tp->nvram_jedecnum = JEDEC_ST;
10104 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10105 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10106 tp->nvram_pagesize = 256;
10107 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10108 tp->nvram_size = (protect ?
10109 TG3_NVRAM_SIZE_64KB :
10110 TG3_NVRAM_SIZE_128KB);
10111 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10112 tp->nvram_size = (protect ?
10113 TG3_NVRAM_SIZE_64KB :
10114 TG3_NVRAM_SIZE_256KB);
10115 else
10116 tp->nvram_size = (protect ?
10117 TG3_NVRAM_SIZE_128KB :
10118 TG3_NVRAM_SIZE_512KB);
10119 break;
10120 }
10121 }
10122
10123 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10124 {
10125 u32 nvcfg1;
10126
10127 nvcfg1 = tr32(NVRAM_CFG1);
10128
10129 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10130 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10131 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10132 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10133 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10134 tp->nvram_jedecnum = JEDEC_ATMEL;
10135 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10136 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10137
10138 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10139 tw32(NVRAM_CFG1, nvcfg1);
10140 break;
10141 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10142 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10143 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10144 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10145 tp->nvram_jedecnum = JEDEC_ATMEL;
10146 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10147 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10148 tp->nvram_pagesize = 264;
10149 break;
10150 case FLASH_5752VENDOR_ST_M45PE10:
10151 case FLASH_5752VENDOR_ST_M45PE20:
10152 case FLASH_5752VENDOR_ST_M45PE40:
10153 tp->nvram_jedecnum = JEDEC_ST;
10154 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10155 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10156 tp->nvram_pagesize = 256;
10157 break;
10158 }
10159 }
10160
10161 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10162 {
10163 u32 nvcfg1, protect = 0;
10164
10165 nvcfg1 = tr32(NVRAM_CFG1);
10166
10167 /* NVRAM protection for TPM */
10168 if (nvcfg1 & (1 << 27)) {
10169 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10170 protect = 1;
10171 }
10172
10173 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10174 switch (nvcfg1) {
10175 case FLASH_5761VENDOR_ATMEL_ADB021D:
10176 case FLASH_5761VENDOR_ATMEL_ADB041D:
10177 case FLASH_5761VENDOR_ATMEL_ADB081D:
10178 case FLASH_5761VENDOR_ATMEL_ADB161D:
10179 case FLASH_5761VENDOR_ATMEL_MDB021D:
10180 case FLASH_5761VENDOR_ATMEL_MDB041D:
10181 case FLASH_5761VENDOR_ATMEL_MDB081D:
10182 case FLASH_5761VENDOR_ATMEL_MDB161D:
10183 tp->nvram_jedecnum = JEDEC_ATMEL;
10184 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10185 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10186 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10187 tp->nvram_pagesize = 256;
10188 break;
10189 case FLASH_5761VENDOR_ST_A_M45PE20:
10190 case FLASH_5761VENDOR_ST_A_M45PE40:
10191 case FLASH_5761VENDOR_ST_A_M45PE80:
10192 case FLASH_5761VENDOR_ST_A_M45PE16:
10193 case FLASH_5761VENDOR_ST_M_M45PE20:
10194 case FLASH_5761VENDOR_ST_M_M45PE40:
10195 case FLASH_5761VENDOR_ST_M_M45PE80:
10196 case FLASH_5761VENDOR_ST_M_M45PE16:
10197 tp->nvram_jedecnum = JEDEC_ST;
10198 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10199 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10200 tp->nvram_pagesize = 256;
10201 break;
10202 }
10203
10204 if (protect) {
10205 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10206 } else {
10207 switch (nvcfg1) {
10208 case FLASH_5761VENDOR_ATMEL_ADB161D:
10209 case FLASH_5761VENDOR_ATMEL_MDB161D:
10210 case FLASH_5761VENDOR_ST_A_M45PE16:
10211 case FLASH_5761VENDOR_ST_M_M45PE16:
10212 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10213 break;
10214 case FLASH_5761VENDOR_ATMEL_ADB081D:
10215 case FLASH_5761VENDOR_ATMEL_MDB081D:
10216 case FLASH_5761VENDOR_ST_A_M45PE80:
10217 case FLASH_5761VENDOR_ST_M_M45PE80:
10218 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10219 break;
10220 case FLASH_5761VENDOR_ATMEL_ADB041D:
10221 case FLASH_5761VENDOR_ATMEL_MDB041D:
10222 case FLASH_5761VENDOR_ST_A_M45PE40:
10223 case FLASH_5761VENDOR_ST_M_M45PE40:
10224 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10225 break;
10226 case FLASH_5761VENDOR_ATMEL_ADB021D:
10227 case FLASH_5761VENDOR_ATMEL_MDB021D:
10228 case FLASH_5761VENDOR_ST_A_M45PE20:
10229 case FLASH_5761VENDOR_ST_M_M45PE20:
10230 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10231 break;
10232 }
10233 }
10234 }
10235
10236 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10237 {
10238 tp->nvram_jedecnum = JEDEC_ATMEL;
10239 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10240 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10241 }
10242
10243 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10244 static void __devinit tg3_nvram_init(struct tg3 *tp)
10245 {
10246 tw32_f(GRC_EEPROM_ADDR,
10247 (EEPROM_ADDR_FSM_RESET |
10248 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10249 EEPROM_ADDR_CLKPERD_SHIFT)));
10250
10251 msleep(1);
10252
10253 /* Enable seeprom accesses. */
10254 tw32_f(GRC_LOCAL_CTRL,
10255 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10256 udelay(100);
10257
10258 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10259 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10260 tp->tg3_flags |= TG3_FLAG_NVRAM;
10261
10262 if (tg3_nvram_lock(tp)) {
10263 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10264 "tg3_nvram_init failed.\n", tp->dev->name);
10265 return;
10266 }
10267 tg3_enable_nvram_access(tp);
10268
10269 tp->nvram_size = 0;
10270
10271 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10272 tg3_get_5752_nvram_info(tp);
10273 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10274 tg3_get_5755_nvram_info(tp);
10275 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10276 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10277 tg3_get_5787_nvram_info(tp);
10278 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10279 tg3_get_5761_nvram_info(tp);
10280 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10281 tg3_get_5906_nvram_info(tp);
10282 else
10283 tg3_get_nvram_info(tp);
10284
10285 if (tp->nvram_size == 0)
10286 tg3_get_nvram_size(tp);
10287
10288 tg3_disable_nvram_access(tp);
10289 tg3_nvram_unlock(tp);
10290
10291 } else {
10292 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10293
10294 tg3_get_eeprom_size(tp);
10295 }
10296 }
10297
10298 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10299 u32 offset, u32 *val)
10300 {
10301 u32 tmp;
10302 int i;
10303
10304 if (offset > EEPROM_ADDR_ADDR_MASK ||
10305 (offset % 4) != 0)
10306 return -EINVAL;
10307
10308 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10309 EEPROM_ADDR_DEVID_MASK |
10310 EEPROM_ADDR_READ);
10311 tw32(GRC_EEPROM_ADDR,
10312 tmp |
10313 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10314 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10315 EEPROM_ADDR_ADDR_MASK) |
10316 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10317
10318 for (i = 0; i < 1000; i++) {
10319 tmp = tr32(GRC_EEPROM_ADDR);
10320
10321 if (tmp & EEPROM_ADDR_COMPLETE)
10322 break;
10323 msleep(1);
10324 }
10325 if (!(tmp & EEPROM_ADDR_COMPLETE))
10326 return -EBUSY;
10327
10328 *val = tr32(GRC_EEPROM_DATA);
10329 return 0;
10330 }
10331
10332 #define NVRAM_CMD_TIMEOUT 10000
10333
10334 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10335 {
10336 int i;
10337
10338 tw32(NVRAM_CMD, nvram_cmd);
10339 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10340 udelay(10);
10341 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10342 udelay(10);
10343 break;
10344 }
10345 }
10346 if (i == NVRAM_CMD_TIMEOUT) {
10347 return -EBUSY;
10348 }
10349 return 0;
10350 }
10351
10352 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10353 {
10354 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10355 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10356 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10357 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10358 (tp->nvram_jedecnum == JEDEC_ATMEL))
10359
10360 addr = ((addr / tp->nvram_pagesize) <<
10361 ATMEL_AT45DB0X1B_PAGE_POS) +
10362 (addr % tp->nvram_pagesize);
10363
10364 return addr;
10365 }
10366
10367 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10368 {
10369 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10370 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10371 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10372 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10373 (tp->nvram_jedecnum == JEDEC_ATMEL))
10374
10375 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10376 tp->nvram_pagesize) +
10377 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10378
10379 return addr;
10380 }
10381
10382 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10383 {
10384 int ret;
10385
10386 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10387 return tg3_nvram_read_using_eeprom(tp, offset, val);
10388
10389 offset = tg3_nvram_phys_addr(tp, offset);
10390
10391 if (offset > NVRAM_ADDR_MSK)
10392 return -EINVAL;
10393
10394 ret = tg3_nvram_lock(tp);
10395 if (ret)
10396 return ret;
10397
10398 tg3_enable_nvram_access(tp);
10399
10400 tw32(NVRAM_ADDR, offset);
10401 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10402 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10403
10404 if (ret == 0)
10405 *val = swab32(tr32(NVRAM_RDDATA));
10406
10407 tg3_disable_nvram_access(tp);
10408
10409 tg3_nvram_unlock(tp);
10410
10411 return ret;
10412 }
10413
10414 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10415 {
10416 u32 v;
10417 int res = tg3_nvram_read(tp, offset, &v);
10418 if (!res)
10419 *val = cpu_to_le32(v);
10420 return res;
10421 }
10422
10423 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10424 {
10425 int err;
10426 u32 tmp;
10427
10428 err = tg3_nvram_read(tp, offset, &tmp);
10429 *val = swab32(tmp);
10430 return err;
10431 }
10432
10433 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10434 u32 offset, u32 len, u8 *buf)
10435 {
10436 int i, j, rc = 0;
10437 u32 val;
10438
10439 for (i = 0; i < len; i += 4) {
10440 u32 addr;
10441 __le32 data;
10442
10443 addr = offset + i;
10444
10445 memcpy(&data, buf + i, 4);
10446
10447 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10448
10449 val = tr32(GRC_EEPROM_ADDR);
10450 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10451
10452 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10453 EEPROM_ADDR_READ);
10454 tw32(GRC_EEPROM_ADDR, val |
10455 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10456 (addr & EEPROM_ADDR_ADDR_MASK) |
10457 EEPROM_ADDR_START |
10458 EEPROM_ADDR_WRITE);
10459
10460 for (j = 0; j < 1000; j++) {
10461 val = tr32(GRC_EEPROM_ADDR);
10462
10463 if (val & EEPROM_ADDR_COMPLETE)
10464 break;
10465 msleep(1);
10466 }
10467 if (!(val & EEPROM_ADDR_COMPLETE)) {
10468 rc = -EBUSY;
10469 break;
10470 }
10471 }
10472
10473 return rc;
10474 }
10475
10476 /* offset and length are dword aligned */
10477 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10478 u8 *buf)
10479 {
10480 int ret = 0;
10481 u32 pagesize = tp->nvram_pagesize;
10482 u32 pagemask = pagesize - 1;
10483 u32 nvram_cmd;
10484 u8 *tmp;
10485
10486 tmp = kmalloc(pagesize, GFP_KERNEL);
10487 if (tmp == NULL)
10488 return -ENOMEM;
10489
10490 while (len) {
10491 int j;
10492 u32 phy_addr, page_off, size;
10493
10494 phy_addr = offset & ~pagemask;
10495
10496 for (j = 0; j < pagesize; j += 4) {
10497 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10498 (__le32 *) (tmp + j))))
10499 break;
10500 }
10501 if (ret)
10502 break;
10503
10504 page_off = offset & pagemask;
10505 size = pagesize;
10506 if (len < size)
10507 size = len;
10508
10509 len -= size;
10510
10511 memcpy(tmp + page_off, buf, size);
10512
10513 offset = offset + (pagesize - page_off);
10514
10515 tg3_enable_nvram_access(tp);
10516
10517 /*
10518 * Before we can erase the flash page, we need
10519 * to issue a special "write enable" command.
10520 */
10521 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10522
10523 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10524 break;
10525
10526 /* Erase the target page */
10527 tw32(NVRAM_ADDR, phy_addr);
10528
10529 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10530 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10531
10532 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10533 break;
10534
10535 /* Issue another write enable to start the write. */
10536 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10537
10538 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10539 break;
10540
10541 for (j = 0; j < pagesize; j += 4) {
10542 __be32 data;
10543
10544 data = *((__be32 *) (tmp + j));
10545 /* swab32(le32_to_cpu(data)), actually */
10546 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10547
10548 tw32(NVRAM_ADDR, phy_addr + j);
10549
10550 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10551 NVRAM_CMD_WR;
10552
10553 if (j == 0)
10554 nvram_cmd |= NVRAM_CMD_FIRST;
10555 else if (j == (pagesize - 4))
10556 nvram_cmd |= NVRAM_CMD_LAST;
10557
10558 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10559 break;
10560 }
10561 if (ret)
10562 break;
10563 }
10564
10565 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10566 tg3_nvram_exec_cmd(tp, nvram_cmd);
10567
10568 kfree(tmp);
10569
10570 return ret;
10571 }
10572
10573 /* offset and length are dword aligned */
10574 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10575 u8 *buf)
10576 {
10577 int i, ret = 0;
10578
10579 for (i = 0; i < len; i += 4, offset += 4) {
10580 u32 page_off, phy_addr, nvram_cmd;
10581 __be32 data;
10582
10583 memcpy(&data, buf + i, 4);
10584 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10585
10586 page_off = offset % tp->nvram_pagesize;
10587
10588 phy_addr = tg3_nvram_phys_addr(tp, offset);
10589
10590 tw32(NVRAM_ADDR, phy_addr);
10591
10592 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10593
10594 if ((page_off == 0) || (i == 0))
10595 nvram_cmd |= NVRAM_CMD_FIRST;
10596 if (page_off == (tp->nvram_pagesize - 4))
10597 nvram_cmd |= NVRAM_CMD_LAST;
10598
10599 if (i == (len - 4))
10600 nvram_cmd |= NVRAM_CMD_LAST;
10601
10602 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10603 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10604 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10605 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10606 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10607 (tp->nvram_jedecnum == JEDEC_ST) &&
10608 (nvram_cmd & NVRAM_CMD_FIRST)) {
10609
10610 if ((ret = tg3_nvram_exec_cmd(tp,
10611 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10612 NVRAM_CMD_DONE)))
10613
10614 break;
10615 }
10616 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10617 /* We always do complete word writes to eeprom. */
10618 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10619 }
10620
10621 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10622 break;
10623 }
10624 return ret;
10625 }
10626
10627 /* offset and length are dword aligned */
10628 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10629 {
10630 int ret;
10631
10632 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10633 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10634 ~GRC_LCLCTRL_GPIO_OUTPUT1);
10635 udelay(40);
10636 }
10637
10638 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10639 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10640 }
10641 else {
10642 u32 grc_mode;
10643
10644 ret = tg3_nvram_lock(tp);
10645 if (ret)
10646 return ret;
10647
10648 tg3_enable_nvram_access(tp);
10649 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10650 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10651 tw32(NVRAM_WRITE1, 0x406);
10652
10653 grc_mode = tr32(GRC_MODE);
10654 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10655
10656 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10657 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10658
10659 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10660 buf);
10661 }
10662 else {
10663 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10664 buf);
10665 }
10666
10667 grc_mode = tr32(GRC_MODE);
10668 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10669
10670 tg3_disable_nvram_access(tp);
10671 tg3_nvram_unlock(tp);
10672 }
10673
10674 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10675 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10676 udelay(40);
10677 }
10678
10679 return ret;
10680 }
10681
10682 struct subsys_tbl_ent {
10683 u16 subsys_vendor, subsys_devid;
10684 u32 phy_id;
10685 };
10686
10687 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10688 /* Broadcom boards. */
10689 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10690 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10691 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10692 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10693 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10694 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10695 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10696 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10697 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10698 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10699 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10700
10701 /* 3com boards. */
10702 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10703 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10704 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10705 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10706 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10707
10708 /* DELL boards. */
10709 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10710 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10711 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10712 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10713
10714 /* Compaq boards. */
10715 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10716 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10717 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10718 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10719 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10720
10721 /* IBM boards. */
10722 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10723 };
10724
10725 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10726 {
10727 int i;
10728
10729 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10730 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10731 tp->pdev->subsystem_vendor) &&
10732 (subsys_id_to_phy_id[i].subsys_devid ==
10733 tp->pdev->subsystem_device))
10734 return &subsys_id_to_phy_id[i];
10735 }
10736 return NULL;
10737 }
10738
10739 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10740 {
10741 u32 val;
10742 u16 pmcsr;
10743
10744 /* On some early chips the SRAM cannot be accessed in D3hot state,
10745 * so need make sure we're in D0.
10746 */
10747 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10748 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10749 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10750 msleep(1);
10751
10752 /* Make sure register accesses (indirect or otherwise)
10753 * will function correctly.
10754 */
10755 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10756 tp->misc_host_ctrl);
10757
10758 /* The memory arbiter has to be enabled in order for SRAM accesses
10759 * to succeed. Normally on powerup the tg3 chip firmware will make
10760 * sure it is enabled, but other entities such as system netboot
10761 * code might disable it.
10762 */
10763 val = tr32(MEMARB_MODE);
10764 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10765
10766 tp->phy_id = PHY_ID_INVALID;
10767 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10768
10769 /* Assume an onboard device and WOL capable by default. */
10770 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10771
10772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10773 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10774 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10775 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10776 }
10777 val = tr32(VCPU_CFGSHDW);
10778 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10779 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10780 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10781 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10782 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10783 return;
10784 }
10785
10786 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10787 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10788 u32 nic_cfg, led_cfg;
10789 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10790 int eeprom_phy_serdes = 0;
10791
10792 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10793 tp->nic_sram_data_cfg = nic_cfg;
10794
10795 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10796 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10797 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10798 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10799 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10800 (ver > 0) && (ver < 0x100))
10801 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10802
10803 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10804 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10805 eeprom_phy_serdes = 1;
10806
10807 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10808 if (nic_phy_id != 0) {
10809 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10810 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10811
10812 eeprom_phy_id = (id1 >> 16) << 10;
10813 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10814 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10815 } else
10816 eeprom_phy_id = 0;
10817
10818 tp->phy_id = eeprom_phy_id;
10819 if (eeprom_phy_serdes) {
10820 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10821 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10822 else
10823 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10824 }
10825
10826 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10827 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10828 SHASTA_EXT_LED_MODE_MASK);
10829 else
10830 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10831
10832 switch (led_cfg) {
10833 default:
10834 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10835 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10836 break;
10837
10838 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10839 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10840 break;
10841
10842 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10843 tp->led_ctrl = LED_CTRL_MODE_MAC;
10844
10845 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10846 * read on some older 5700/5701 bootcode.
10847 */
10848 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10849 ASIC_REV_5700 ||
10850 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10851 ASIC_REV_5701)
10852 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10853
10854 break;
10855
10856 case SHASTA_EXT_LED_SHARED:
10857 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10858 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10859 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10860 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10861 LED_CTRL_MODE_PHY_2);
10862 break;
10863
10864 case SHASTA_EXT_LED_MAC:
10865 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10866 break;
10867
10868 case SHASTA_EXT_LED_COMBO:
10869 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10870 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10871 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10872 LED_CTRL_MODE_PHY_2);
10873 break;
10874
10875 };
10876
10877 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10879 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10880 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10881
10882 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10883 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10884
10885 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10886 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10887 if ((tp->pdev->subsystem_vendor ==
10888 PCI_VENDOR_ID_ARIMA) &&
10889 (tp->pdev->subsystem_device == 0x205a ||
10890 tp->pdev->subsystem_device == 0x2063))
10891 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10892 } else {
10893 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10894 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10895 }
10896
10897 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10898 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10899 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10900 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10901 }
10902 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10903 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10904 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10905 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10906 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10907
10908 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10909 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10910 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10911
10912 if (cfg2 & (1 << 17))
10913 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10914
10915 /* serdes signal pre-emphasis in register 0x590 set by */
10916 /* bootcode if bit 18 is set */
10917 if (cfg2 & (1 << 18))
10918 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10919
10920 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10921 u32 cfg3;
10922
10923 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10924 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10925 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10926 }
10927 }
10928 }
10929
10930 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10931 {
10932 int i;
10933 u32 val;
10934
10935 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10936 tw32(OTP_CTRL, cmd);
10937
10938 /* Wait for up to 1 ms for command to execute. */
10939 for (i = 0; i < 100; i++) {
10940 val = tr32(OTP_STATUS);
10941 if (val & OTP_STATUS_CMD_DONE)
10942 break;
10943 udelay(10);
10944 }
10945
10946 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10947 }
10948
10949 /* Read the gphy configuration from the OTP region of the chip. The gphy
10950 * configuration is a 32-bit value that straddles the alignment boundary.
10951 * We do two 32-bit reads and then shift and merge the results.
10952 */
10953 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10954 {
10955 u32 bhalf_otp, thalf_otp;
10956
10957 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
10958
10959 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
10960 return 0;
10961
10962 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
10963
10964 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10965 return 0;
10966
10967 thalf_otp = tr32(OTP_READ_DATA);
10968
10969 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
10970
10971 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10972 return 0;
10973
10974 bhalf_otp = tr32(OTP_READ_DATA);
10975
10976 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
10977 }
10978
10979 static int __devinit tg3_phy_probe(struct tg3 *tp)
10980 {
10981 u32 hw_phy_id_1, hw_phy_id_2;
10982 u32 hw_phy_id, hw_phy_id_masked;
10983 int err;
10984
10985 /* Reading the PHY ID register can conflict with ASF
10986 * firwmare access to the PHY hardware.
10987 */
10988 err = 0;
10989 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10990 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10991 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10992 } else {
10993 /* Now read the physical PHY_ID from the chip and verify
10994 * that it is sane. If it doesn't look good, we fall back
10995 * to either the hard-coded table based PHY_ID and failing
10996 * that the value found in the eeprom area.
10997 */
10998 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10999 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11000
11001 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11002 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11003 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11004
11005 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11006 }
11007
11008 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11009 tp->phy_id = hw_phy_id;
11010 if (hw_phy_id_masked == PHY_ID_BCM8002)
11011 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11012 else
11013 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11014 } else {
11015 if (tp->phy_id != PHY_ID_INVALID) {
11016 /* Do nothing, phy ID already set up in
11017 * tg3_get_eeprom_hw_cfg().
11018 */
11019 } else {
11020 struct subsys_tbl_ent *p;
11021
11022 /* No eeprom signature? Try the hardcoded
11023 * subsys device table.
11024 */
11025 p = lookup_by_subsys(tp);
11026 if (!p)
11027 return -ENODEV;
11028
11029 tp->phy_id = p->phy_id;
11030 if (!tp->phy_id ||
11031 tp->phy_id == PHY_ID_BCM8002)
11032 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11033 }
11034 }
11035
11036 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11037 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11038 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11039 u32 bmsr, adv_reg, tg3_ctrl, mask;
11040
11041 tg3_readphy(tp, MII_BMSR, &bmsr);
11042 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11043 (bmsr & BMSR_LSTATUS))
11044 goto skip_phy_reset;
11045
11046 err = tg3_phy_reset(tp);
11047 if (err)
11048 return err;
11049
11050 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11051 ADVERTISE_100HALF | ADVERTISE_100FULL |
11052 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11053 tg3_ctrl = 0;
11054 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11055 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11056 MII_TG3_CTRL_ADV_1000_FULL);
11057 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11058 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11059 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11060 MII_TG3_CTRL_ENABLE_AS_MASTER);
11061 }
11062
11063 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11064 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11065 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11066 if (!tg3_copper_is_advertising_all(tp, mask)) {
11067 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11068
11069 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11070 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11071
11072 tg3_writephy(tp, MII_BMCR,
11073 BMCR_ANENABLE | BMCR_ANRESTART);
11074 }
11075 tg3_phy_set_wirespeed(tp);
11076
11077 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11078 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11079 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11080 }
11081
11082 skip_phy_reset:
11083 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11084 err = tg3_init_5401phy_dsp(tp);
11085 if (err)
11086 return err;
11087 }
11088
11089 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11090 err = tg3_init_5401phy_dsp(tp);
11091 }
11092
11093 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11094 tp->link_config.advertising =
11095 (ADVERTISED_1000baseT_Half |
11096 ADVERTISED_1000baseT_Full |
11097 ADVERTISED_Autoneg |
11098 ADVERTISED_FIBRE);
11099 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11100 tp->link_config.advertising &=
11101 ~(ADVERTISED_1000baseT_Half |
11102 ADVERTISED_1000baseT_Full);
11103
11104 return err;
11105 }
11106
11107 static void __devinit tg3_read_partno(struct tg3 *tp)
11108 {
11109 unsigned char vpd_data[256];
11110 unsigned int i;
11111 u32 magic;
11112
11113 if (tg3_nvram_read_swab(tp, 0x0, &magic))
11114 goto out_not_found;
11115
11116 if (magic == TG3_EEPROM_MAGIC) {
11117 for (i = 0; i < 256; i += 4) {
11118 u32 tmp;
11119
11120 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11121 goto out_not_found;
11122
11123 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11124 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11125 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11126 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11127 }
11128 } else {
11129 int vpd_cap;
11130
11131 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11132 for (i = 0; i < 256; i += 4) {
11133 u32 tmp, j = 0;
11134 __le32 v;
11135 u16 tmp16;
11136
11137 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11138 i);
11139 while (j++ < 100) {
11140 pci_read_config_word(tp->pdev, vpd_cap +
11141 PCI_VPD_ADDR, &tmp16);
11142 if (tmp16 & 0x8000)
11143 break;
11144 msleep(1);
11145 }
11146 if (!(tmp16 & 0x8000))
11147 goto out_not_found;
11148
11149 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11150 &tmp);
11151 v = cpu_to_le32(tmp);
11152 memcpy(&vpd_data[i], &v, 4);
11153 }
11154 }
11155
11156 /* Now parse and find the part number. */
11157 for (i = 0; i < 254; ) {
11158 unsigned char val = vpd_data[i];
11159 unsigned int block_end;
11160
11161 if (val == 0x82 || val == 0x91) {
11162 i = (i + 3 +
11163 (vpd_data[i + 1] +
11164 (vpd_data[i + 2] << 8)));
11165 continue;
11166 }
11167
11168 if (val != 0x90)
11169 goto out_not_found;
11170
11171 block_end = (i + 3 +
11172 (vpd_data[i + 1] +
11173 (vpd_data[i + 2] << 8)));
11174 i += 3;
11175
11176 if (block_end > 256)
11177 goto out_not_found;
11178
11179 while (i < (block_end - 2)) {
11180 if (vpd_data[i + 0] == 'P' &&
11181 vpd_data[i + 1] == 'N') {
11182 int partno_len = vpd_data[i + 2];
11183
11184 i += 3;
11185 if (partno_len > 24 || (partno_len + i) > 256)
11186 goto out_not_found;
11187
11188 memcpy(tp->board_part_number,
11189 &vpd_data[i], partno_len);
11190
11191 /* Success. */
11192 return;
11193 }
11194 i += 3 + vpd_data[i + 2];
11195 }
11196
11197 /* Part number not found. */
11198 goto out_not_found;
11199 }
11200
11201 out_not_found:
11202 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11203 strcpy(tp->board_part_number, "BCM95906");
11204 else
11205 strcpy(tp->board_part_number, "none");
11206 }
11207
11208 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11209 {
11210 u32 val;
11211
11212 if (tg3_nvram_read_swab(tp, offset, &val) ||
11213 (val & 0xfc000000) != 0x0c000000 ||
11214 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11215 val != 0)
11216 return 0;
11217
11218 return 1;
11219 }
11220
11221 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11222 {
11223 u32 val, offset, start;
11224 u32 ver_offset;
11225 int i, bcnt;
11226
11227 if (tg3_nvram_read_swab(tp, 0, &val))
11228 return;
11229
11230 if (val != TG3_EEPROM_MAGIC)
11231 return;
11232
11233 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11234 tg3_nvram_read_swab(tp, 0x4, &start))
11235 return;
11236
11237 offset = tg3_nvram_logical_addr(tp, offset);
11238
11239 if (!tg3_fw_img_is_valid(tp, offset) ||
11240 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11241 return;
11242
11243 offset = offset + ver_offset - start;
11244 for (i = 0; i < 16; i += 4) {
11245 __le32 v;
11246 if (tg3_nvram_read_le(tp, offset + i, &v))
11247 return;
11248
11249 memcpy(tp->fw_ver + i, &v, 4);
11250 }
11251
11252 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11253 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11254 return;
11255
11256 for (offset = TG3_NVM_DIR_START;
11257 offset < TG3_NVM_DIR_END;
11258 offset += TG3_NVM_DIRENT_SIZE) {
11259 if (tg3_nvram_read_swab(tp, offset, &val))
11260 return;
11261
11262 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11263 break;
11264 }
11265
11266 if (offset == TG3_NVM_DIR_END)
11267 return;
11268
11269 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11270 start = 0x08000000;
11271 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11272 return;
11273
11274 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11275 !tg3_fw_img_is_valid(tp, offset) ||
11276 tg3_nvram_read_swab(tp, offset + 8, &val))
11277 return;
11278
11279 offset += val - start;
11280
11281 bcnt = strlen(tp->fw_ver);
11282
11283 tp->fw_ver[bcnt++] = ',';
11284 tp->fw_ver[bcnt++] = ' ';
11285
11286 for (i = 0; i < 4; i++) {
11287 __le32 v;
11288 if (tg3_nvram_read_le(tp, offset, &v))
11289 return;
11290
11291 offset += sizeof(v);
11292
11293 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11294 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11295 break;
11296 }
11297
11298 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11299 bcnt += sizeof(v);
11300 }
11301
11302 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11303 }
11304
11305 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11306
11307 static int __devinit tg3_get_invariants(struct tg3 *tp)
11308 {
11309 static struct pci_device_id write_reorder_chipsets[] = {
11310 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11311 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11312 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11313 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11314 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11315 PCI_DEVICE_ID_VIA_8385_0) },
11316 { },
11317 };
11318 u32 misc_ctrl_reg;
11319 u32 cacheline_sz_reg;
11320 u32 pci_state_reg, grc_misc_cfg;
11321 u32 val;
11322 u16 pci_cmd;
11323 int err, pcie_cap;
11324
11325 /* Force memory write invalidate off. If we leave it on,
11326 * then on 5700_BX chips we have to enable a workaround.
11327 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11328 * to match the cacheline size. The Broadcom driver have this
11329 * workaround but turns MWI off all the times so never uses
11330 * it. This seems to suggest that the workaround is insufficient.
11331 */
11332 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11333 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11334 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11335
11336 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11337 * has the register indirect write enable bit set before
11338 * we try to access any of the MMIO registers. It is also
11339 * critical that the PCI-X hw workaround situation is decided
11340 * before that as well.
11341 */
11342 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11343 &misc_ctrl_reg);
11344
11345 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11346 MISC_HOST_CTRL_CHIPREV_SHIFT);
11347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11348 u32 prod_id_asic_rev;
11349
11350 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11351 &prod_id_asic_rev);
11352 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11353 }
11354
11355 /* Wrong chip ID in 5752 A0. This code can be removed later
11356 * as A0 is not in production.
11357 */
11358 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11359 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11360
11361 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11362 * we need to disable memory and use config. cycles
11363 * only to access all registers. The 5702/03 chips
11364 * can mistakenly decode the special cycles from the
11365 * ICH chipsets as memory write cycles, causing corruption
11366 * of register and memory space. Only certain ICH bridges
11367 * will drive special cycles with non-zero data during the
11368 * address phase which can fall within the 5703's address
11369 * range. This is not an ICH bug as the PCI spec allows
11370 * non-zero address during special cycles. However, only
11371 * these ICH bridges are known to drive non-zero addresses
11372 * during special cycles.
11373 *
11374 * Since special cycles do not cross PCI bridges, we only
11375 * enable this workaround if the 5703 is on the secondary
11376 * bus of these ICH bridges.
11377 */
11378 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11379 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11380 static struct tg3_dev_id {
11381 u32 vendor;
11382 u32 device;
11383 u32 rev;
11384 } ich_chipsets[] = {
11385 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11386 PCI_ANY_ID },
11387 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11388 PCI_ANY_ID },
11389 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11390 0xa },
11391 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11392 PCI_ANY_ID },
11393 { },
11394 };
11395 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11396 struct pci_dev *bridge = NULL;
11397
11398 while (pci_id->vendor != 0) {
11399 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11400 bridge);
11401 if (!bridge) {
11402 pci_id++;
11403 continue;
11404 }
11405 if (pci_id->rev != PCI_ANY_ID) {
11406 if (bridge->revision > pci_id->rev)
11407 continue;
11408 }
11409 if (bridge->subordinate &&
11410 (bridge->subordinate->number ==
11411 tp->pdev->bus->number)) {
11412
11413 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11414 pci_dev_put(bridge);
11415 break;
11416 }
11417 }
11418 }
11419
11420 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11421 static struct tg3_dev_id {
11422 u32 vendor;
11423 u32 device;
11424 } bridge_chipsets[] = {
11425 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11426 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11427 { },
11428 };
11429 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11430 struct pci_dev *bridge = NULL;
11431
11432 while (pci_id->vendor != 0) {
11433 bridge = pci_get_device(pci_id->vendor,
11434 pci_id->device,
11435 bridge);
11436 if (!bridge) {
11437 pci_id++;
11438 continue;
11439 }
11440 if (bridge->subordinate &&
11441 (bridge->subordinate->number <=
11442 tp->pdev->bus->number) &&
11443 (bridge->subordinate->subordinate >=
11444 tp->pdev->bus->number)) {
11445 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11446 pci_dev_put(bridge);
11447 break;
11448 }
11449 }
11450 }
11451
11452 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11453 * DMA addresses > 40-bit. This bridge may have other additional
11454 * 57xx devices behind it in some 4-port NIC designs for example.
11455 * Any tg3 device found behind the bridge will also need the 40-bit
11456 * DMA workaround.
11457 */
11458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11459 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11460 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11461 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11462 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11463 }
11464 else {
11465 struct pci_dev *bridge = NULL;
11466
11467 do {
11468 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11469 PCI_DEVICE_ID_SERVERWORKS_EPB,
11470 bridge);
11471 if (bridge && bridge->subordinate &&
11472 (bridge->subordinate->number <=
11473 tp->pdev->bus->number) &&
11474 (bridge->subordinate->subordinate >=
11475 tp->pdev->bus->number)) {
11476 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11477 pci_dev_put(bridge);
11478 break;
11479 }
11480 } while (bridge);
11481 }
11482
11483 /* Initialize misc host control in PCI block. */
11484 tp->misc_host_ctrl |= (misc_ctrl_reg &
11485 MISC_HOST_CTRL_CHIPREV);
11486 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11487 tp->misc_host_ctrl);
11488
11489 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11490 &cacheline_sz_reg);
11491
11492 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11493 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11494 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11495 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11496
11497 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11498 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11499 tp->pdev_peer = tg3_find_peer(tp);
11500
11501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11503 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11504 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11505 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11508 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11509 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11510
11511 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11512 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11513 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11514
11515 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11516 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11517 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11518 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11519 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11520 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11521 tp->pdev_peer == tp->pdev))
11522 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11523
11524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11528 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11529 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11530 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11531 } else {
11532 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11533 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11534 ASIC_REV_5750 &&
11535 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11536 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11537 }
11538 }
11539
11540 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11541 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11542 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11543
11544 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11545 if (pcie_cap != 0) {
11546 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11547
11548 pcie_set_readrq(tp->pdev, 4096);
11549
11550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11551 u16 lnkctl;
11552
11553 pci_read_config_word(tp->pdev,
11554 pcie_cap + PCI_EXP_LNKCTL,
11555 &lnkctl);
11556 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11557 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11558 }
11559 }
11560
11561 /* If we have an AMD 762 or VIA K8T800 chipset, write
11562 * reordering to the mailbox registers done by the host
11563 * controller can cause major troubles. We read back from
11564 * every mailbox register write to force the writes to be
11565 * posted to the chip in order.
11566 */
11567 if (pci_dev_present(write_reorder_chipsets) &&
11568 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11569 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11570
11571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11572 tp->pci_lat_timer < 64) {
11573 tp->pci_lat_timer = 64;
11574
11575 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11576 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11577 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11578 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11579
11580 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11581 cacheline_sz_reg);
11582 }
11583
11584 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11585 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11586 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11587 if (!tp->pcix_cap) {
11588 printk(KERN_ERR PFX "Cannot find PCI-X "
11589 "capability, aborting.\n");
11590 return -EIO;
11591 }
11592 }
11593
11594 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11595 &pci_state_reg);
11596
11597 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11598 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11599
11600 /* If this is a 5700 BX chipset, and we are in PCI-X
11601 * mode, enable register write workaround.
11602 *
11603 * The workaround is to use indirect register accesses
11604 * for all chip writes not to mailbox registers.
11605 */
11606 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11607 u32 pm_reg;
11608
11609 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11610
11611 /* The chip can have it's power management PCI config
11612 * space registers clobbered due to this bug.
11613 * So explicitly force the chip into D0 here.
11614 */
11615 pci_read_config_dword(tp->pdev,
11616 tp->pm_cap + PCI_PM_CTRL,
11617 &pm_reg);
11618 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11619 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11620 pci_write_config_dword(tp->pdev,
11621 tp->pm_cap + PCI_PM_CTRL,
11622 pm_reg);
11623
11624 /* Also, force SERR#/PERR# in PCI command. */
11625 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11626 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11627 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11628 }
11629 }
11630
11631 /* 5700 BX chips need to have their TX producer index mailboxes
11632 * written twice to workaround a bug.
11633 */
11634 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11635 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11636
11637 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11638 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11639 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11640 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11641
11642 /* Chip-specific fixup from Broadcom driver */
11643 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11644 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11645 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11646 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11647 }
11648
11649 /* Default fast path register access methods */
11650 tp->read32 = tg3_read32;
11651 tp->write32 = tg3_write32;
11652 tp->read32_mbox = tg3_read32;
11653 tp->write32_mbox = tg3_write32;
11654 tp->write32_tx_mbox = tg3_write32;
11655 tp->write32_rx_mbox = tg3_write32;
11656
11657 /* Various workaround register access methods */
11658 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11659 tp->write32 = tg3_write_indirect_reg32;
11660 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11661 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11662 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11663 /*
11664 * Back to back register writes can cause problems on these
11665 * chips, the workaround is to read back all reg writes
11666 * except those to mailbox regs.
11667 *
11668 * See tg3_write_indirect_reg32().
11669 */
11670 tp->write32 = tg3_write_flush_reg32;
11671 }
11672
11673
11674 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11675 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11676 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11677 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11678 tp->write32_rx_mbox = tg3_write_flush_reg32;
11679 }
11680
11681 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11682 tp->read32 = tg3_read_indirect_reg32;
11683 tp->write32 = tg3_write_indirect_reg32;
11684 tp->read32_mbox = tg3_read_indirect_mbox;
11685 tp->write32_mbox = tg3_write_indirect_mbox;
11686 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11687 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11688
11689 iounmap(tp->regs);
11690 tp->regs = NULL;
11691
11692 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11693 pci_cmd &= ~PCI_COMMAND_MEMORY;
11694 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11695 }
11696 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11697 tp->read32_mbox = tg3_read32_mbox_5906;
11698 tp->write32_mbox = tg3_write32_mbox_5906;
11699 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11700 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11701 }
11702
11703 if (tp->write32 == tg3_write_indirect_reg32 ||
11704 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11705 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11707 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11708
11709 /* Get eeprom hw config before calling tg3_set_power_state().
11710 * In particular, the TG3_FLG2_IS_NIC flag must be
11711 * determined before calling tg3_set_power_state() so that
11712 * we know whether or not to switch out of Vaux power.
11713 * When the flag is set, it means that GPIO1 is used for eeprom
11714 * write protect and also implies that it is a LOM where GPIOs
11715 * are not used to switch power.
11716 */
11717 tg3_get_eeprom_hw_cfg(tp);
11718
11719 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11720 /* Allow reads and writes to the
11721 * APE register and memory space.
11722 */
11723 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11724 PCISTATE_ALLOW_APE_SHMEM_WR;
11725 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11726 pci_state_reg);
11727 }
11728
11729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11730 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11731 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11732
11733 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11734 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11735 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11736 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11737 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11738 }
11739
11740 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11741 * GPIO1 driven high will bring 5700's external PHY out of reset.
11742 * It is also used as eeprom write protect on LOMs.
11743 */
11744 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11745 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11746 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11747 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11748 GRC_LCLCTRL_GPIO_OUTPUT1);
11749 /* Unused GPIO3 must be driven as output on 5752 because there
11750 * are no pull-up resistors on unused GPIO pins.
11751 */
11752 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11753 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11754
11755 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11756 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11757
11758 /* Force the chip into D0. */
11759 err = tg3_set_power_state(tp, PCI_D0);
11760 if (err) {
11761 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11762 pci_name(tp->pdev));
11763 return err;
11764 }
11765
11766 /* 5700 B0 chips do not support checksumming correctly due
11767 * to hardware bugs.
11768 */
11769 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11770 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11771
11772 /* Derive initial jumbo mode from MTU assigned in
11773 * ether_setup() via the alloc_etherdev() call
11774 */
11775 if (tp->dev->mtu > ETH_DATA_LEN &&
11776 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11777 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11778
11779 /* Determine WakeOnLan speed to use. */
11780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11781 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11782 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11783 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11784 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11785 } else {
11786 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11787 }
11788
11789 /* A few boards don't want Ethernet@WireSpeed phy feature */
11790 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11791 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11792 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11793 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11794 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11795 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11796 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11797
11798 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11799 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11800 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11801 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11802 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11803
11804 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11805 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11806 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11807 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11808 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11809 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11810 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11811 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11812 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11813 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11814 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11815 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11816 }
11817
11818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11819 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11820 tp->phy_otp = tg3_read_otp_phycfg(tp);
11821 if (tp->phy_otp == 0)
11822 tp->phy_otp = TG3_OTP_DEFAULT;
11823 }
11824
11825 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
11826 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11827 else
11828 tp->mi_mode = MAC_MI_MODE_BASE;
11829
11830 tp->coalesce_mode = 0;
11831 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11832 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11833 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11834
11835 /* Initialize MAC MI mode, polling disabled. */
11836 tw32_f(MAC_MI_MODE, tp->mi_mode);
11837 udelay(80);
11838
11839 /* Initialize data/descriptor byte/word swapping. */
11840 val = tr32(GRC_MODE);
11841 val &= GRC_MODE_HOST_STACKUP;
11842 tw32(GRC_MODE, val | tp->grc_mode);
11843
11844 tg3_switch_clocks(tp);
11845
11846 /* Clear this out for sanity. */
11847 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11848
11849 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11850 &pci_state_reg);
11851 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11852 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11853 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11854
11855 if (chiprevid == CHIPREV_ID_5701_A0 ||
11856 chiprevid == CHIPREV_ID_5701_B0 ||
11857 chiprevid == CHIPREV_ID_5701_B2 ||
11858 chiprevid == CHIPREV_ID_5701_B5) {
11859 void __iomem *sram_base;
11860
11861 /* Write some dummy words into the SRAM status block
11862 * area, see if it reads back correctly. If the return
11863 * value is bad, force enable the PCIX workaround.
11864 */
11865 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11866
11867 writel(0x00000000, sram_base);
11868 writel(0x00000000, sram_base + 4);
11869 writel(0xffffffff, sram_base + 4);
11870 if (readl(sram_base) != 0x00000000)
11871 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11872 }
11873 }
11874
11875 udelay(50);
11876 tg3_nvram_init(tp);
11877
11878 grc_misc_cfg = tr32(GRC_MISC_CFG);
11879 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11880
11881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11882 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11883 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11884 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11885
11886 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11887 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11888 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11889 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11890 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11891 HOSTCC_MODE_CLRTICK_TXBD);
11892
11893 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11894 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11895 tp->misc_host_ctrl);
11896 }
11897
11898 /* these are limited to 10/100 only */
11899 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11900 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11901 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11902 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11903 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11904 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11905 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11906 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11907 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11908 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11909 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11911 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11912
11913 err = tg3_phy_probe(tp);
11914 if (err) {
11915 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11916 pci_name(tp->pdev), err);
11917 /* ... but do not return immediately ... */
11918 }
11919
11920 tg3_read_partno(tp);
11921 tg3_read_fw_ver(tp);
11922
11923 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11924 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11925 } else {
11926 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11927 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11928 else
11929 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11930 }
11931
11932 /* 5700 {AX,BX} chips have a broken status block link
11933 * change bit implementation, so we must use the
11934 * status register in those cases.
11935 */
11936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11937 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11938 else
11939 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11940
11941 /* The led_ctrl is set during tg3_phy_probe, here we might
11942 * have to force the link status polling mechanism based
11943 * upon subsystem IDs.
11944 */
11945 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11947 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11948 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11949 TG3_FLAG_USE_LINKCHG_REG);
11950 }
11951
11952 /* For all SERDES we poll the MAC status register. */
11953 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11954 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11955 else
11956 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11957
11958 /* All chips before 5787 can get confused if TX buffers
11959 * straddle the 4GB address boundary in some cases.
11960 */
11961 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11963 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11966 tp->dev->hard_start_xmit = tg3_start_xmit;
11967 else
11968 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11969
11970 tp->rx_offset = 2;
11971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11972 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11973 tp->rx_offset = 0;
11974
11975 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11976
11977 /* Increment the rx prod index on the rx std ring by at most
11978 * 8 for these chips to workaround hw errata.
11979 */
11980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11983 tp->rx_std_max_post = 8;
11984
11985 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11986 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11987 PCIE_PWR_MGMT_L1_THRESH_MSK;
11988
11989 return err;
11990 }
11991
11992 #ifdef CONFIG_SPARC
11993 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11994 {
11995 struct net_device *dev = tp->dev;
11996 struct pci_dev *pdev = tp->pdev;
11997 struct device_node *dp = pci_device_to_OF_node(pdev);
11998 const unsigned char *addr;
11999 int len;
12000
12001 addr = of_get_property(dp, "local-mac-address", &len);
12002 if (addr && len == 6) {
12003 memcpy(dev->dev_addr, addr, 6);
12004 memcpy(dev->perm_addr, dev->dev_addr, 6);
12005 return 0;
12006 }
12007 return -ENODEV;
12008 }
12009
12010 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12011 {
12012 struct net_device *dev = tp->dev;
12013
12014 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12015 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12016 return 0;
12017 }
12018 #endif
12019
12020 static int __devinit tg3_get_device_address(struct tg3 *tp)
12021 {
12022 struct net_device *dev = tp->dev;
12023 u32 hi, lo, mac_offset;
12024 int addr_ok = 0;
12025
12026 #ifdef CONFIG_SPARC
12027 if (!tg3_get_macaddr_sparc(tp))
12028 return 0;
12029 #endif
12030
12031 mac_offset = 0x7c;
12032 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12033 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12034 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12035 mac_offset = 0xcc;
12036 if (tg3_nvram_lock(tp))
12037 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12038 else
12039 tg3_nvram_unlock(tp);
12040 }
12041 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12042 mac_offset = 0x10;
12043
12044 /* First try to get it from MAC address mailbox. */
12045 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12046 if ((hi >> 16) == 0x484b) {
12047 dev->dev_addr[0] = (hi >> 8) & 0xff;
12048 dev->dev_addr[1] = (hi >> 0) & 0xff;
12049
12050 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12051 dev->dev_addr[2] = (lo >> 24) & 0xff;
12052 dev->dev_addr[3] = (lo >> 16) & 0xff;
12053 dev->dev_addr[4] = (lo >> 8) & 0xff;
12054 dev->dev_addr[5] = (lo >> 0) & 0xff;
12055
12056 /* Some old bootcode may report a 0 MAC address in SRAM */
12057 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12058 }
12059 if (!addr_ok) {
12060 /* Next, try NVRAM. */
12061 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12062 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12063 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12064 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12065 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12066 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12067 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12068 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12069 }
12070 /* Finally just fetch it out of the MAC control regs. */
12071 else {
12072 hi = tr32(MAC_ADDR_0_HIGH);
12073 lo = tr32(MAC_ADDR_0_LOW);
12074
12075 dev->dev_addr[5] = lo & 0xff;
12076 dev->dev_addr[4] = (lo >> 8) & 0xff;
12077 dev->dev_addr[3] = (lo >> 16) & 0xff;
12078 dev->dev_addr[2] = (lo >> 24) & 0xff;
12079 dev->dev_addr[1] = hi & 0xff;
12080 dev->dev_addr[0] = (hi >> 8) & 0xff;
12081 }
12082 }
12083
12084 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12085 #ifdef CONFIG_SPARC
12086 if (!tg3_get_default_macaddr_sparc(tp))
12087 return 0;
12088 #endif
12089 return -EINVAL;
12090 }
12091 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12092 return 0;
12093 }
12094
12095 #define BOUNDARY_SINGLE_CACHELINE 1
12096 #define BOUNDARY_MULTI_CACHELINE 2
12097
12098 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12099 {
12100 int cacheline_size;
12101 u8 byte;
12102 int goal;
12103
12104 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12105 if (byte == 0)
12106 cacheline_size = 1024;
12107 else
12108 cacheline_size = (int) byte * 4;
12109
12110 /* On 5703 and later chips, the boundary bits have no
12111 * effect.
12112 */
12113 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12114 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12115 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12116 goto out;
12117
12118 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12119 goal = BOUNDARY_MULTI_CACHELINE;
12120 #else
12121 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12122 goal = BOUNDARY_SINGLE_CACHELINE;
12123 #else
12124 goal = 0;
12125 #endif
12126 #endif
12127
12128 if (!goal)
12129 goto out;
12130
12131 /* PCI controllers on most RISC systems tend to disconnect
12132 * when a device tries to burst across a cache-line boundary.
12133 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12134 *
12135 * Unfortunately, for PCI-E there are only limited
12136 * write-side controls for this, and thus for reads
12137 * we will still get the disconnects. We'll also waste
12138 * these PCI cycles for both read and write for chips
12139 * other than 5700 and 5701 which do not implement the
12140 * boundary bits.
12141 */
12142 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12143 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12144 switch (cacheline_size) {
12145 case 16:
12146 case 32:
12147 case 64:
12148 case 128:
12149 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12150 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12151 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12152 } else {
12153 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12154 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12155 }
12156 break;
12157
12158 case 256:
12159 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12160 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12161 break;
12162
12163 default:
12164 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12165 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12166 break;
12167 };
12168 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12169 switch (cacheline_size) {
12170 case 16:
12171 case 32:
12172 case 64:
12173 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12174 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12175 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12176 break;
12177 }
12178 /* fallthrough */
12179 case 128:
12180 default:
12181 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12182 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12183 break;
12184 };
12185 } else {
12186 switch (cacheline_size) {
12187 case 16:
12188 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12189 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12190 DMA_RWCTRL_WRITE_BNDRY_16);
12191 break;
12192 }
12193 /* fallthrough */
12194 case 32:
12195 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12196 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12197 DMA_RWCTRL_WRITE_BNDRY_32);
12198 break;
12199 }
12200 /* fallthrough */
12201 case 64:
12202 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12203 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12204 DMA_RWCTRL_WRITE_BNDRY_64);
12205 break;
12206 }
12207 /* fallthrough */
12208 case 128:
12209 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12210 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12211 DMA_RWCTRL_WRITE_BNDRY_128);
12212 break;
12213 }
12214 /* fallthrough */
12215 case 256:
12216 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12217 DMA_RWCTRL_WRITE_BNDRY_256);
12218 break;
12219 case 512:
12220 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12221 DMA_RWCTRL_WRITE_BNDRY_512);
12222 break;
12223 case 1024:
12224 default:
12225 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12226 DMA_RWCTRL_WRITE_BNDRY_1024);
12227 break;
12228 };
12229 }
12230
12231 out:
12232 return val;
12233 }
12234
12235 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12236 {
12237 struct tg3_internal_buffer_desc test_desc;
12238 u32 sram_dma_descs;
12239 int i, ret;
12240
12241 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12242
12243 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12244 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12245 tw32(RDMAC_STATUS, 0);
12246 tw32(WDMAC_STATUS, 0);
12247
12248 tw32(BUFMGR_MODE, 0);
12249 tw32(FTQ_RESET, 0);
12250
12251 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12252 test_desc.addr_lo = buf_dma & 0xffffffff;
12253 test_desc.nic_mbuf = 0x00002100;
12254 test_desc.len = size;
12255
12256 /*
12257 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12258 * the *second* time the tg3 driver was getting loaded after an
12259 * initial scan.
12260 *
12261 * Broadcom tells me:
12262 * ...the DMA engine is connected to the GRC block and a DMA
12263 * reset may affect the GRC block in some unpredictable way...
12264 * The behavior of resets to individual blocks has not been tested.
12265 *
12266 * Broadcom noted the GRC reset will also reset all sub-components.
12267 */
12268 if (to_device) {
12269 test_desc.cqid_sqid = (13 << 8) | 2;
12270
12271 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12272 udelay(40);
12273 } else {
12274 test_desc.cqid_sqid = (16 << 8) | 7;
12275
12276 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12277 udelay(40);
12278 }
12279 test_desc.flags = 0x00000005;
12280
12281 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12282 u32 val;
12283
12284 val = *(((u32 *)&test_desc) + i);
12285 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12286 sram_dma_descs + (i * sizeof(u32)));
12287 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12288 }
12289 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12290
12291 if (to_device) {
12292 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12293 } else {
12294 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12295 }
12296
12297 ret = -ENODEV;
12298 for (i = 0; i < 40; i++) {
12299 u32 val;
12300
12301 if (to_device)
12302 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12303 else
12304 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12305 if ((val & 0xffff) == sram_dma_descs) {
12306 ret = 0;
12307 break;
12308 }
12309
12310 udelay(100);
12311 }
12312
12313 return ret;
12314 }
12315
12316 #define TEST_BUFFER_SIZE 0x2000
12317
12318 static int __devinit tg3_test_dma(struct tg3 *tp)
12319 {
12320 dma_addr_t buf_dma;
12321 u32 *buf, saved_dma_rwctrl;
12322 int ret;
12323
12324 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12325 if (!buf) {
12326 ret = -ENOMEM;
12327 goto out_nofree;
12328 }
12329
12330 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12331 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12332
12333 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12334
12335 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12336 /* DMA read watermark not used on PCIE */
12337 tp->dma_rwctrl |= 0x00180000;
12338 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12339 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12340 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12341 tp->dma_rwctrl |= 0x003f0000;
12342 else
12343 tp->dma_rwctrl |= 0x003f000f;
12344 } else {
12345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12347 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12348 u32 read_water = 0x7;
12349
12350 /* If the 5704 is behind the EPB bridge, we can
12351 * do the less restrictive ONE_DMA workaround for
12352 * better performance.
12353 */
12354 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12356 tp->dma_rwctrl |= 0x8000;
12357 else if (ccval == 0x6 || ccval == 0x7)
12358 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12359
12360 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12361 read_water = 4;
12362 /* Set bit 23 to enable PCIX hw bug fix */
12363 tp->dma_rwctrl |=
12364 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12365 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12366 (1 << 23);
12367 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12368 /* 5780 always in PCIX mode */
12369 tp->dma_rwctrl |= 0x00144000;
12370 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12371 /* 5714 always in PCIX mode */
12372 tp->dma_rwctrl |= 0x00148000;
12373 } else {
12374 tp->dma_rwctrl |= 0x001b000f;
12375 }
12376 }
12377
12378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12379 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12380 tp->dma_rwctrl &= 0xfffffff0;
12381
12382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12383 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12384 /* Remove this if it causes problems for some boards. */
12385 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12386
12387 /* On 5700/5701 chips, we need to set this bit.
12388 * Otherwise the chip will issue cacheline transactions
12389 * to streamable DMA memory with not all the byte
12390 * enables turned on. This is an error on several
12391 * RISC PCI controllers, in particular sparc64.
12392 *
12393 * On 5703/5704 chips, this bit has been reassigned
12394 * a different meaning. In particular, it is used
12395 * on those chips to enable a PCI-X workaround.
12396 */
12397 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12398 }
12399
12400 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12401
12402 #if 0
12403 /* Unneeded, already done by tg3_get_invariants. */
12404 tg3_switch_clocks(tp);
12405 #endif
12406
12407 ret = 0;
12408 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12409 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12410 goto out;
12411
12412 /* It is best to perform DMA test with maximum write burst size
12413 * to expose the 5700/5701 write DMA bug.
12414 */
12415 saved_dma_rwctrl = tp->dma_rwctrl;
12416 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12417 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12418
12419 while (1) {
12420 u32 *p = buf, i;
12421
12422 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12423 p[i] = i;
12424
12425 /* Send the buffer to the chip. */
12426 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12427 if (ret) {
12428 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12429 break;
12430 }
12431
12432 #if 0
12433 /* validate data reached card RAM correctly. */
12434 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12435 u32 val;
12436 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12437 if (le32_to_cpu(val) != p[i]) {
12438 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12439 /* ret = -ENODEV here? */
12440 }
12441 p[i] = 0;
12442 }
12443 #endif
12444 /* Now read it back. */
12445 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12446 if (ret) {
12447 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12448
12449 break;
12450 }
12451
12452 /* Verify it. */
12453 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12454 if (p[i] == i)
12455 continue;
12456
12457 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12458 DMA_RWCTRL_WRITE_BNDRY_16) {
12459 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12460 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12461 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12462 break;
12463 } else {
12464 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12465 ret = -ENODEV;
12466 goto out;
12467 }
12468 }
12469
12470 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12471 /* Success. */
12472 ret = 0;
12473 break;
12474 }
12475 }
12476 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12477 DMA_RWCTRL_WRITE_BNDRY_16) {
12478 static struct pci_device_id dma_wait_state_chipsets[] = {
12479 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12480 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12481 { },
12482 };
12483
12484 /* DMA test passed without adjusting DMA boundary,
12485 * now look for chipsets that are known to expose the
12486 * DMA bug without failing the test.
12487 */
12488 if (pci_dev_present(dma_wait_state_chipsets)) {
12489 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12490 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12491 }
12492 else
12493 /* Safe to use the calculated DMA boundary. */
12494 tp->dma_rwctrl = saved_dma_rwctrl;
12495
12496 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12497 }
12498
12499 out:
12500 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12501 out_nofree:
12502 return ret;
12503 }
12504
12505 static void __devinit tg3_init_link_config(struct tg3 *tp)
12506 {
12507 tp->link_config.advertising =
12508 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12509 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12510 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12511 ADVERTISED_Autoneg | ADVERTISED_MII);
12512 tp->link_config.speed = SPEED_INVALID;
12513 tp->link_config.duplex = DUPLEX_INVALID;
12514 tp->link_config.autoneg = AUTONEG_ENABLE;
12515 tp->link_config.active_speed = SPEED_INVALID;
12516 tp->link_config.active_duplex = DUPLEX_INVALID;
12517 tp->link_config.phy_is_low_power = 0;
12518 tp->link_config.orig_speed = SPEED_INVALID;
12519 tp->link_config.orig_duplex = DUPLEX_INVALID;
12520 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12521 }
12522
12523 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12524 {
12525 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12526 tp->bufmgr_config.mbuf_read_dma_low_water =
12527 DEFAULT_MB_RDMA_LOW_WATER_5705;
12528 tp->bufmgr_config.mbuf_mac_rx_low_water =
12529 DEFAULT_MB_MACRX_LOW_WATER_5705;
12530 tp->bufmgr_config.mbuf_high_water =
12531 DEFAULT_MB_HIGH_WATER_5705;
12532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12533 tp->bufmgr_config.mbuf_mac_rx_low_water =
12534 DEFAULT_MB_MACRX_LOW_WATER_5906;
12535 tp->bufmgr_config.mbuf_high_water =
12536 DEFAULT_MB_HIGH_WATER_5906;
12537 }
12538
12539 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12540 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12541 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12542 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12543 tp->bufmgr_config.mbuf_high_water_jumbo =
12544 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12545 } else {
12546 tp->bufmgr_config.mbuf_read_dma_low_water =
12547 DEFAULT_MB_RDMA_LOW_WATER;
12548 tp->bufmgr_config.mbuf_mac_rx_low_water =
12549 DEFAULT_MB_MACRX_LOW_WATER;
12550 tp->bufmgr_config.mbuf_high_water =
12551 DEFAULT_MB_HIGH_WATER;
12552
12553 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12554 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12555 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12556 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12557 tp->bufmgr_config.mbuf_high_water_jumbo =
12558 DEFAULT_MB_HIGH_WATER_JUMBO;
12559 }
12560
12561 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12562 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12563 }
12564
12565 static char * __devinit tg3_phy_string(struct tg3 *tp)
12566 {
12567 switch (tp->phy_id & PHY_ID_MASK) {
12568 case PHY_ID_BCM5400: return "5400";
12569 case PHY_ID_BCM5401: return "5401";
12570 case PHY_ID_BCM5411: return "5411";
12571 case PHY_ID_BCM5701: return "5701";
12572 case PHY_ID_BCM5703: return "5703";
12573 case PHY_ID_BCM5704: return "5704";
12574 case PHY_ID_BCM5705: return "5705";
12575 case PHY_ID_BCM5750: return "5750";
12576 case PHY_ID_BCM5752: return "5752";
12577 case PHY_ID_BCM5714: return "5714";
12578 case PHY_ID_BCM5780: return "5780";
12579 case PHY_ID_BCM5755: return "5755";
12580 case PHY_ID_BCM5787: return "5787";
12581 case PHY_ID_BCM5784: return "5784";
12582 case PHY_ID_BCM5756: return "5722/5756";
12583 case PHY_ID_BCM5906: return "5906";
12584 case PHY_ID_BCM5761: return "5761";
12585 case PHY_ID_BCM8002: return "8002/serdes";
12586 case 0: return "serdes";
12587 default: return "unknown";
12588 };
12589 }
12590
12591 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12592 {
12593 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12594 strcpy(str, "PCI Express");
12595 return str;
12596 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12597 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12598
12599 strcpy(str, "PCIX:");
12600
12601 if ((clock_ctrl == 7) ||
12602 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12603 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12604 strcat(str, "133MHz");
12605 else if (clock_ctrl == 0)
12606 strcat(str, "33MHz");
12607 else if (clock_ctrl == 2)
12608 strcat(str, "50MHz");
12609 else if (clock_ctrl == 4)
12610 strcat(str, "66MHz");
12611 else if (clock_ctrl == 6)
12612 strcat(str, "100MHz");
12613 } else {
12614 strcpy(str, "PCI:");
12615 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12616 strcat(str, "66MHz");
12617 else
12618 strcat(str, "33MHz");
12619 }
12620 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12621 strcat(str, ":32-bit");
12622 else
12623 strcat(str, ":64-bit");
12624 return str;
12625 }
12626
12627 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12628 {
12629 struct pci_dev *peer;
12630 unsigned int func, devnr = tp->pdev->devfn & ~7;
12631
12632 for (func = 0; func < 8; func++) {
12633 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12634 if (peer && peer != tp->pdev)
12635 break;
12636 pci_dev_put(peer);
12637 }
12638 /* 5704 can be configured in single-port mode, set peer to
12639 * tp->pdev in that case.
12640 */
12641 if (!peer) {
12642 peer = tp->pdev;
12643 return peer;
12644 }
12645
12646 /*
12647 * We don't need to keep the refcount elevated; there's no way
12648 * to remove one half of this device without removing the other
12649 */
12650 pci_dev_put(peer);
12651
12652 return peer;
12653 }
12654
12655 static void __devinit tg3_init_coal(struct tg3 *tp)
12656 {
12657 struct ethtool_coalesce *ec = &tp->coal;
12658
12659 memset(ec, 0, sizeof(*ec));
12660 ec->cmd = ETHTOOL_GCOALESCE;
12661 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12662 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12663 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12664 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12665 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12666 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12667 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12668 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12669 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12670
12671 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12672 HOSTCC_MODE_CLRTICK_TXBD)) {
12673 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12674 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12675 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12676 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12677 }
12678
12679 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12680 ec->rx_coalesce_usecs_irq = 0;
12681 ec->tx_coalesce_usecs_irq = 0;
12682 ec->stats_block_coalesce_usecs = 0;
12683 }
12684 }
12685
12686 static int __devinit tg3_init_one(struct pci_dev *pdev,
12687 const struct pci_device_id *ent)
12688 {
12689 static int tg3_version_printed = 0;
12690 resource_size_t tg3reg_base;
12691 unsigned long tg3reg_len;
12692 struct net_device *dev;
12693 struct tg3 *tp;
12694 int err, pm_cap;
12695 char str[40];
12696 u64 dma_mask, persist_dma_mask;
12697 DECLARE_MAC_BUF(mac);
12698
12699 if (tg3_version_printed++ == 0)
12700 printk(KERN_INFO "%s", version);
12701
12702 err = pci_enable_device(pdev);
12703 if (err) {
12704 printk(KERN_ERR PFX "Cannot enable PCI device, "
12705 "aborting.\n");
12706 return err;
12707 }
12708
12709 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12710 printk(KERN_ERR PFX "Cannot find proper PCI device "
12711 "base address, aborting.\n");
12712 err = -ENODEV;
12713 goto err_out_disable_pdev;
12714 }
12715
12716 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12717 if (err) {
12718 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12719 "aborting.\n");
12720 goto err_out_disable_pdev;
12721 }
12722
12723 pci_set_master(pdev);
12724
12725 /* Find power-management capability. */
12726 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12727 if (pm_cap == 0) {
12728 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12729 "aborting.\n");
12730 err = -EIO;
12731 goto err_out_free_res;
12732 }
12733
12734 tg3reg_base = pci_resource_start(pdev, 0);
12735 tg3reg_len = pci_resource_len(pdev, 0);
12736
12737 dev = alloc_etherdev(sizeof(*tp));
12738 if (!dev) {
12739 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12740 err = -ENOMEM;
12741 goto err_out_free_res;
12742 }
12743
12744 SET_NETDEV_DEV(dev, &pdev->dev);
12745
12746 #if TG3_VLAN_TAG_USED
12747 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12748 dev->vlan_rx_register = tg3_vlan_rx_register;
12749 #endif
12750
12751 tp = netdev_priv(dev);
12752 tp->pdev = pdev;
12753 tp->dev = dev;
12754 tp->pm_cap = pm_cap;
12755 tp->mac_mode = TG3_DEF_MAC_MODE;
12756 tp->rx_mode = TG3_DEF_RX_MODE;
12757 tp->tx_mode = TG3_DEF_TX_MODE;
12758
12759 if (tg3_debug > 0)
12760 tp->msg_enable = tg3_debug;
12761 else
12762 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12763
12764 /* The word/byte swap controls here control register access byte
12765 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12766 * setting below.
12767 */
12768 tp->misc_host_ctrl =
12769 MISC_HOST_CTRL_MASK_PCI_INT |
12770 MISC_HOST_CTRL_WORD_SWAP |
12771 MISC_HOST_CTRL_INDIR_ACCESS |
12772 MISC_HOST_CTRL_PCISTATE_RW;
12773
12774 /* The NONFRM (non-frame) byte/word swap controls take effect
12775 * on descriptor entries, anything which isn't packet data.
12776 *
12777 * The StrongARM chips on the board (one for tx, one for rx)
12778 * are running in big-endian mode.
12779 */
12780 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12781 GRC_MODE_WSWAP_NONFRM_DATA);
12782 #ifdef __BIG_ENDIAN
12783 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12784 #endif
12785 spin_lock_init(&tp->lock);
12786 spin_lock_init(&tp->indirect_lock);
12787 INIT_WORK(&tp->reset_task, tg3_reset_task);
12788
12789 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12790 if (!tp->regs) {
12791 printk(KERN_ERR PFX "Cannot map device registers, "
12792 "aborting.\n");
12793 err = -ENOMEM;
12794 goto err_out_free_dev;
12795 }
12796
12797 tg3_init_link_config(tp);
12798
12799 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12800 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12801 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12802
12803 dev->open = tg3_open;
12804 dev->stop = tg3_close;
12805 dev->get_stats = tg3_get_stats;
12806 dev->set_multicast_list = tg3_set_rx_mode;
12807 dev->set_mac_address = tg3_set_mac_addr;
12808 dev->do_ioctl = tg3_ioctl;
12809 dev->tx_timeout = tg3_tx_timeout;
12810 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12811 dev->ethtool_ops = &tg3_ethtool_ops;
12812 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12813 dev->change_mtu = tg3_change_mtu;
12814 dev->irq = pdev->irq;
12815 #ifdef CONFIG_NET_POLL_CONTROLLER
12816 dev->poll_controller = tg3_poll_controller;
12817 #endif
12818
12819 err = tg3_get_invariants(tp);
12820 if (err) {
12821 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12822 "aborting.\n");
12823 goto err_out_iounmap;
12824 }
12825
12826 /* The EPB bridge inside 5714, 5715, and 5780 and any
12827 * device behind the EPB cannot support DMA addresses > 40-bit.
12828 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12829 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12830 * do DMA address check in tg3_start_xmit().
12831 */
12832 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12833 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12834 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12835 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12836 #ifdef CONFIG_HIGHMEM
12837 dma_mask = DMA_64BIT_MASK;
12838 #endif
12839 } else
12840 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12841
12842 /* Configure DMA attributes. */
12843 if (dma_mask > DMA_32BIT_MASK) {
12844 err = pci_set_dma_mask(pdev, dma_mask);
12845 if (!err) {
12846 dev->features |= NETIF_F_HIGHDMA;
12847 err = pci_set_consistent_dma_mask(pdev,
12848 persist_dma_mask);
12849 if (err < 0) {
12850 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12851 "DMA for consistent allocations\n");
12852 goto err_out_iounmap;
12853 }
12854 }
12855 }
12856 if (err || dma_mask == DMA_32BIT_MASK) {
12857 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12858 if (err) {
12859 printk(KERN_ERR PFX "No usable DMA configuration, "
12860 "aborting.\n");
12861 goto err_out_iounmap;
12862 }
12863 }
12864
12865 tg3_init_bufmgr_config(tp);
12866
12867 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12868 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12869 }
12870 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12871 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12872 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12873 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12874 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12875 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12876 } else {
12877 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12878 }
12879
12880 /* TSO is on by default on chips that support hardware TSO.
12881 * Firmware TSO on older chips gives lower performance, so it
12882 * is off by default, but can be enabled using ethtool.
12883 */
12884 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12885 dev->features |= NETIF_F_TSO;
12886 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12887 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12888 dev->features |= NETIF_F_TSO6;
12889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12890 dev->features |= NETIF_F_TSO_ECN;
12891 }
12892
12893
12894 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12895 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12896 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12897 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12898 tp->rx_pending = 63;
12899 }
12900
12901 err = tg3_get_device_address(tp);
12902 if (err) {
12903 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12904 "aborting.\n");
12905 goto err_out_iounmap;
12906 }
12907
12908 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12909 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12910 printk(KERN_ERR PFX "Cannot find proper PCI device "
12911 "base address for APE, aborting.\n");
12912 err = -ENODEV;
12913 goto err_out_iounmap;
12914 }
12915
12916 tg3reg_base = pci_resource_start(pdev, 2);
12917 tg3reg_len = pci_resource_len(pdev, 2);
12918
12919 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12920 if (!tp->aperegs) {
12921 printk(KERN_ERR PFX "Cannot map APE registers, "
12922 "aborting.\n");
12923 err = -ENOMEM;
12924 goto err_out_iounmap;
12925 }
12926
12927 tg3_ape_lock_init(tp);
12928 }
12929
12930 /*
12931 * Reset chip in case UNDI or EFI driver did not shutdown
12932 * DMA self test will enable WDMAC and we'll see (spurious)
12933 * pending DMA on the PCI bus at that point.
12934 */
12935 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12936 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12937 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12938 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12939 }
12940
12941 err = tg3_test_dma(tp);
12942 if (err) {
12943 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12944 goto err_out_apeunmap;
12945 }
12946
12947 /* Tigon3 can do ipv4 only... and some chips have buggy
12948 * checksumming.
12949 */
12950 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12951 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12953 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12956 dev->features |= NETIF_F_IPV6_CSUM;
12957
12958 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12959 } else
12960 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12961
12962 /* flow control autonegotiation is default behavior */
12963 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12964 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12965
12966 tg3_init_coal(tp);
12967
12968 pci_set_drvdata(pdev, dev);
12969
12970 err = register_netdev(dev);
12971 if (err) {
12972 printk(KERN_ERR PFX "Cannot register net device, "
12973 "aborting.\n");
12974 goto err_out_apeunmap;
12975 }
12976
12977 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12978 "(%s) %s Ethernet %s\n",
12979 dev->name,
12980 tp->board_part_number,
12981 tp->pci_chip_rev_id,
12982 tg3_phy_string(tp),
12983 tg3_bus_string(tp, str),
12984 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12985 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12986 "10/100/1000Base-T")),
12987 print_mac(mac, dev->dev_addr));
12988
12989 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12990 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12991 dev->name,
12992 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12993 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12994 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12995 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12996 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12997 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12998 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12999 dev->name, tp->dma_rwctrl,
13000 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13001 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13002
13003 return 0;
13004
13005 err_out_apeunmap:
13006 if (tp->aperegs) {
13007 iounmap(tp->aperegs);
13008 tp->aperegs = NULL;
13009 }
13010
13011 err_out_iounmap:
13012 if (tp->regs) {
13013 iounmap(tp->regs);
13014 tp->regs = NULL;
13015 }
13016
13017 err_out_free_dev:
13018 free_netdev(dev);
13019
13020 err_out_free_res:
13021 pci_release_regions(pdev);
13022
13023 err_out_disable_pdev:
13024 pci_disable_device(pdev);
13025 pci_set_drvdata(pdev, NULL);
13026 return err;
13027 }
13028
13029 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13030 {
13031 struct net_device *dev = pci_get_drvdata(pdev);
13032
13033 if (dev) {
13034 struct tg3 *tp = netdev_priv(dev);
13035
13036 flush_scheduled_work();
13037 unregister_netdev(dev);
13038 if (tp->aperegs) {
13039 iounmap(tp->aperegs);
13040 tp->aperegs = NULL;
13041 }
13042 if (tp->regs) {
13043 iounmap(tp->regs);
13044 tp->regs = NULL;
13045 }
13046 free_netdev(dev);
13047 pci_release_regions(pdev);
13048 pci_disable_device(pdev);
13049 pci_set_drvdata(pdev, NULL);
13050 }
13051 }
13052
13053 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13054 {
13055 struct net_device *dev = pci_get_drvdata(pdev);
13056 struct tg3 *tp = netdev_priv(dev);
13057 int err;
13058
13059 /* PCI register 4 needs to be saved whether netif_running() or not.
13060 * MSI address and data need to be saved if using MSI and
13061 * netif_running().
13062 */
13063 pci_save_state(pdev);
13064
13065 if (!netif_running(dev))
13066 return 0;
13067
13068 flush_scheduled_work();
13069 tg3_netif_stop(tp);
13070
13071 del_timer_sync(&tp->timer);
13072
13073 tg3_full_lock(tp, 1);
13074 tg3_disable_ints(tp);
13075 tg3_full_unlock(tp);
13076
13077 netif_device_detach(dev);
13078
13079 tg3_full_lock(tp, 0);
13080 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13081 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13082 tg3_full_unlock(tp);
13083
13084 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13085 if (err) {
13086 tg3_full_lock(tp, 0);
13087
13088 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13089 if (tg3_restart_hw(tp, 1))
13090 goto out;
13091
13092 tp->timer.expires = jiffies + tp->timer_offset;
13093 add_timer(&tp->timer);
13094
13095 netif_device_attach(dev);
13096 tg3_netif_start(tp);
13097
13098 out:
13099 tg3_full_unlock(tp);
13100 }
13101
13102 return err;
13103 }
13104
13105 static int tg3_resume(struct pci_dev *pdev)
13106 {
13107 struct net_device *dev = pci_get_drvdata(pdev);
13108 struct tg3 *tp = netdev_priv(dev);
13109 int err;
13110
13111 pci_restore_state(tp->pdev);
13112
13113 if (!netif_running(dev))
13114 return 0;
13115
13116 err = tg3_set_power_state(tp, PCI_D0);
13117 if (err)
13118 return err;
13119
13120 netif_device_attach(dev);
13121
13122 tg3_full_lock(tp, 0);
13123
13124 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13125 err = tg3_restart_hw(tp, 1);
13126 if (err)
13127 goto out;
13128
13129 tp->timer.expires = jiffies + tp->timer_offset;
13130 add_timer(&tp->timer);
13131
13132 tg3_netif_start(tp);
13133
13134 out:
13135 tg3_full_unlock(tp);
13136
13137 return err;
13138 }
13139
13140 static struct pci_driver tg3_driver = {
13141 .name = DRV_MODULE_NAME,
13142 .id_table = tg3_pci_tbl,
13143 .probe = tg3_init_one,
13144 .remove = __devexit_p(tg3_remove_one),
13145 .suspend = tg3_suspend,
13146 .resume = tg3_resume
13147 };
13148
13149 static int __init tg3_init(void)
13150 {
13151 return pci_register_driver(&tg3_driver);
13152 }
13153
13154 static void __exit tg3_cleanup(void)
13155 {
13156 pci_unregister_driver(&tg3_driver);
13157 }
13158
13159 module_init(tg3_init);
13160 module_exit(tg3_cleanup);